xref: /openbsd/sys/arch/hppa/hppa/pmap.c (revision 38cdd850)
1 /*	$OpenBSD: pmap.c,v 1.181 2023/01/24 16:51:05 kettenis Exp $	*/
2 
3 /*
4  * Copyright (c) 1998-2004 Michael Shalayeff
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 /*
29  * References:
30  * 1. PA7100LC ERS, Hewlett-Packard, March 30 1999, Public version 1.0
31  * 2. PA7300LC ERS, Hewlett-Packard, March 18 1996, Version 1.0
32  *
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/atomic.h>
38 #include <sys/proc.h>
39 #include <sys/user.h>
40 #include <sys/pool.h>
41 #include <sys/extent.h>
42 
43 #include <uvm/uvm_extern.h>
44 
45 #include <machine/cpufunc.h>
46 #include <machine/iomod.h>
47 
48 #ifdef PMAPDEBUG
49 #define	DPRINTF(l,s)	do {		\
50 	if ((pmapdebug & (l)) == (l))	\
51 		printf s;		\
52 } while(0)
53 #define	PDB_FOLLOW	0x00000001
54 #define	PDB_INIT	0x00000002
55 #define	PDB_ENTER	0x00000004
56 #define	PDB_REMOVE	0x00000008
57 #define	PDB_CREATE	0x00000010
58 #define	PDB_PTPAGE	0x00000020
59 #define	PDB_CACHE	0x00000040
60 #define	PDB_BITS	0x00000080
61 #define	PDB_COLLECT	0x00000100
62 #define	PDB_PROTECT	0x00000200
63 #define	PDB_EXTRACT	0x00000400
64 #define	PDB_VP		0x00000800
65 #define	PDB_PV		0x00001000
66 #define	PDB_PARANOIA	0x00002000
67 #define	PDB_WIRING	0x00004000
68 #define	PDB_PMAP	0x00008000
69 #define	PDB_STEAL	0x00010000
70 #define	PDB_PHYS	0x00020000
71 #define	PDB_POOL	0x00040000
72 int pmapdebug = 0
73 /*	| PDB_INIT */
74 /*	| PDB_FOLLOW */
75 /*	| PDB_VP */
76 /*	| PDB_PV */
77 /*	| PDB_ENTER */
78 /*	| PDB_REMOVE */
79 /*	| PDB_STEAL */
80 /*	| PDB_PROTECT */
81 /*	| PDB_PHYS */
82 	;
83 #else
84 #define	DPRINTF(l,s)	/* */
85 #endif
86 
87 paddr_t physical_steal, physical_end;
88 
89 int		pmap_hptsize = 16 * PAGE_SIZE;	/* patchable */
90 vaddr_t		pmap_hpt;
91 
92 struct pmap	kernel_pmap_store;
93 int		hppa_sid_max = HPPA_SID_MAX;
94 struct pool	pmap_pmap_pool;
95 struct pool	pmap_pv_pool;
96 int		pmap_pvlowat = 252;
97 int 		pmap_initialized;
98 
99 u_int	hppa_prot[8];
100 
101 #define	pmap_sid(pmap, va) \
102 	(((va & 0xc0000000) != 0xc0000000)? pmap->pmap_space : HPPA_SID_KERNEL)
103 
104 static inline int
pmap_pvh_attrs(pt_entry_t pte)105 pmap_pvh_attrs(pt_entry_t pte)
106 {
107 	int attrs = 0;
108 	if (pte & PTE_PROT(TLB_DIRTY))
109 		attrs |= PG_PMAP_MOD;
110 	if ((pte & PTE_PROT(TLB_REFTRAP)) == 0)
111 		attrs |= PG_PMAP_REF;
112 	return attrs;
113 }
114 
115 struct vm_page	*pmap_pagealloc(struct uvm_object *obj, voff_t off);
116 void		 pmap_pte_flush(struct pmap *pmap, vaddr_t va, pt_entry_t pte);
117 #ifdef DDB
118 void		 pmap_dump_table(pa_space_t space, vaddr_t sva);
119 void		 pmap_dump_pv(paddr_t pa);
120 #endif
121 int		 pmap_check_alias(struct vm_page *pg, vaddr_t va,
122 		    pt_entry_t pte);
123 
124 #define	IS_IOPAGE(pa)	((pa) >= HPPA_IOBEGIN)
125 
126 static inline void
pmap_lock(struct pmap * pmap)127 pmap_lock(struct pmap *pmap)
128 {
129 	if (pmap != pmap_kernel())
130 		mtx_enter(&pmap->pm_mtx);
131 }
132 
133 static inline void
pmap_unlock(struct pmap * pmap)134 pmap_unlock(struct pmap *pmap)
135 {
136 	if (pmap != pmap_kernel())
137 		mtx_leave(&pmap->pm_mtx);
138 }
139 
140 struct vm_page *
pmap_pagealloc(struct uvm_object * obj,voff_t off)141 pmap_pagealloc(struct uvm_object *obj, voff_t off)
142 {
143 	struct vm_page *pg;
144 
145 	if ((pg = uvm_pagealloc(obj, off, NULL,
146 	    UVM_PGA_USERESERVE | UVM_PGA_ZERO)) == NULL)
147 		printf("pmap_pagealloc fail\n");
148 
149 	return (pg);
150 }
151 
152 #ifdef USE_HPT
153 /*
154  * This hash function is the one used by the hardware TLB walker on the 7100LC.
155  */
156 static __inline struct vp_entry *
pmap_hash(struct pmap * pmap,vaddr_t va)157 pmap_hash(struct pmap *pmap, vaddr_t va)
158 {
159 	return (struct vp_entry *)(pmap_hpt +
160 	    (((va >> 8) ^ (pmap->pm_space << 9)) & (pmap_hptsize - 1)));
161 }
162 
163 static __inline u_int32_t
pmap_vtag(struct pmap * pmap,vaddr_t va)164 pmap_vtag(struct pmap *pmap, vaddr_t va)
165 {
166 	return (0x80000000 | (pmap->pm_space & 0xffff) |
167 	    ((va >> 1) & 0x7fff0000));
168 }
169 #endif
170 
171 static __inline void
pmap_sdir_set(pa_space_t space,volatile u_int32_t * pd)172 pmap_sdir_set(pa_space_t space, volatile u_int32_t *pd)
173 {
174 	volatile u_int32_t *vtop;
175 
176 	mfctl(CR_VTOP, vtop);
177 #ifdef PMAPDEBUG
178 	if (!vtop)
179 		panic("pmap_sdir_set: zero vtop");
180 #endif
181 	vtop[space] = (u_int32_t)pd;
182 }
183 
184 static __inline u_int32_t *
pmap_sdir_get(pa_space_t space)185 pmap_sdir_get(pa_space_t space)
186 {
187 	u_int32_t *vtop;
188 
189 	mfctl(CR_VTOP, vtop);
190 	return ((u_int32_t *)vtop[space]);
191 }
192 
193 static __inline volatile pt_entry_t *
pmap_pde_get(volatile u_int32_t * pd,vaddr_t va)194 pmap_pde_get(volatile u_int32_t *pd, vaddr_t va)
195 {
196 	return ((pt_entry_t *)pd[va >> 22]);
197 }
198 
199 static __inline void
pmap_pde_set(struct pmap * pm,vaddr_t va,paddr_t ptp)200 pmap_pde_set(struct pmap *pm, vaddr_t va, paddr_t ptp)
201 {
202 #ifdef PMAPDEBUG
203 	if (ptp & PGOFSET)
204 		panic("pmap_pde_set, unaligned ptp 0x%lx", ptp);
205 #endif
206 	DPRINTF(PDB_FOLLOW|PDB_VP,
207 	    ("pmap_pde_set(%p, 0x%lx, 0x%lx)\n", pm, va, ptp));
208 
209 	pm->pm_pdir[va >> 22] = ptp;
210 }
211 
212 static __inline pt_entry_t *
pmap_pde_alloc(struct pmap * pm,vaddr_t va,struct vm_page ** pdep)213 pmap_pde_alloc(struct pmap *pm, vaddr_t va, struct vm_page **pdep)
214 {
215 	struct vm_page *pg;
216 	volatile pt_entry_t *pde;
217 	paddr_t pa;
218 
219 	DPRINTF(PDB_FOLLOW|PDB_VP,
220 	    ("pmap_pde_alloc(%p, 0x%lx, %p)\n", pm, va, pdep));
221 
222 	pmap_unlock(pm);
223 	pg = pmap_pagealloc(&pm->pm_obj, va);
224 	pmap_lock(pm);
225 	if (pg == NULL)
226 		return (NULL);
227 	pde = pmap_pde_get(pm->pm_pdir, va);
228 	if (pde) {
229 		pmap_unlock(pm);
230 		uvm_pagefree(pg);
231 		pmap_lock(pm);
232 		return (pt_entry_t *)pde;
233 	}
234 
235 	pa = VM_PAGE_TO_PHYS(pg);
236 
237 	DPRINTF(PDB_FOLLOW|PDB_VP, ("pmap_pde_alloc: pde %lx\n", pa));
238 
239 	atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
240 	pg->wire_count = 1;		/* no mappings yet */
241 	pmap_pde_set(pm, va, pa);
242 	pm->pm_stats.resident_count++;	/* count PTP as resident */
243 	pm->pm_ptphint = pg;
244 	if (pdep)
245 		*pdep = pg;
246 	return ((pt_entry_t *)pa);
247 }
248 
249 static __inline struct vm_page *
pmap_pde_ptp(struct pmap * pm,volatile pt_entry_t * pde)250 pmap_pde_ptp(struct pmap *pm, volatile pt_entry_t *pde)
251 {
252 	paddr_t pa = (paddr_t)pde;
253 
254 	DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pde_ptp(%p, %p)\n", pm, pde));
255 
256 	if (pm->pm_ptphint && VM_PAGE_TO_PHYS(pm->pm_ptphint) == pa)
257 		return (pm->pm_ptphint);
258 
259 	DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pde_ptp: lookup 0x%lx\n", pa));
260 
261 	return (PHYS_TO_VM_PAGE(pa));
262 }
263 
264 static __inline void
pmap_pde_release(struct pmap * pmap,vaddr_t va,struct vm_page * ptp)265 pmap_pde_release(struct pmap *pmap, vaddr_t va, struct vm_page *ptp)
266 {
267 	paddr_t pa;
268 
269 	DPRINTF(PDB_FOLLOW|PDB_PV,
270 	    ("pmap_pde_release(%p, 0x%lx, %p)\n", pmap, va, ptp));
271 
272 	if (pmap != pmap_kernel() && --ptp->wire_count <= 1) {
273 		DPRINTF(PDB_FOLLOW|PDB_PV,
274 		    ("pmap_pde_release: disposing ptp %p\n", ptp));
275 
276 		pmap_pde_set(pmap, va, 0);
277 		pmap->pm_stats.resident_count--;
278 		if (pmap->pm_ptphint == ptp) {
279 			pmap->pm_ptphint = RBT_ROOT(uvm_objtree,
280 			    &pmap->pm_obj.memt);
281 		}
282 		ptp->wire_count = 0;
283 #ifdef DIAGNOSTIC
284 		if (ptp->pg_flags & PG_BUSY)
285 			panic("pmap_pde_release: busy page table page");
286 #endif
287 		pa = VM_PAGE_TO_PHYS(ptp);
288 		pdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
289 		pdtlb(HPPA_SID_KERNEL, pa);
290 		uvm_pagefree(ptp);
291 	}
292 }
293 
294 static __inline pt_entry_t
pmap_pte_get(volatile pt_entry_t * pde,vaddr_t va)295 pmap_pte_get(volatile pt_entry_t *pde, vaddr_t va)
296 {
297 	return (pde[(va >> 12) & 0x3ff]);
298 }
299 
300 static __inline void
pmap_pte_set(volatile pt_entry_t * pde,vaddr_t va,pt_entry_t pte)301 pmap_pte_set(volatile pt_entry_t *pde, vaddr_t va, pt_entry_t pte)
302 {
303 	DPRINTF(PDB_FOLLOW|PDB_VP, ("pmap_pte_set(%p, 0x%lx, 0x%x)\n",
304 	    pde, va, pte));
305 
306 #ifdef PMAPDEBUG
307 	if (!pde)
308 		panic("pmap_pte_set: zero pde");
309 
310 	if ((paddr_t)pde & PGOFSET)
311 		panic("pmap_pte_set, unaligned pde %p", pde);
312 #endif
313 
314 	pde[(va >> 12) & 0x3ff] = pte;
315 }
316 
317 void
pmap_pte_flush(struct pmap * pmap,vaddr_t va,pt_entry_t pte)318 pmap_pte_flush(struct pmap *pmap, vaddr_t va, pt_entry_t pte)
319 {
320 	fdcache(pmap->pm_space, va, PAGE_SIZE);
321 	if (pte & PTE_PROT(TLB_EXECUTE)) {
322 		ficache(pmap->pm_space, va, PAGE_SIZE);
323 		pdtlb(pmap->pm_space, va);
324 		pitlb(pmap->pm_space, va);
325 	} else
326 		pdtlb(pmap->pm_space, va);
327 #ifdef USE_HPT
328 	if (pmap_hpt) {
329 		struct vp_entry *hpt;
330 		hpt = pmap_hash(pmap, va);
331 		if (hpt->vp_tag == pmap_vtag(pmap, va))
332 			hpt->vp_tag = 0xffff;
333 	}
334 #endif
335 }
336 
337 static __inline pt_entry_t
pmap_vp_find(struct pmap * pm,vaddr_t va)338 pmap_vp_find(struct pmap *pm, vaddr_t va)
339 {
340 	volatile pt_entry_t *pde;
341 
342 	if (!(pde = pmap_pde_get(pm->pm_pdir, va)))
343 		return (0);
344 
345 	return (pmap_pte_get(pde, va));
346 }
347 
348 #ifdef DDB
349 void
pmap_dump_table(pa_space_t space,vaddr_t sva)350 pmap_dump_table(pa_space_t space, vaddr_t sva)
351 {
352 	pa_space_t sp;
353 
354 	for (sp = 0; sp <= hppa_sid_max; sp++) {
355 		volatile pt_entry_t *pde;
356 		pt_entry_t pte;
357 		vaddr_t va, pdemask;
358 		u_int32_t *pd;
359 
360 		if (((int)space >= 0 && sp != space) ||
361 		    !(pd = pmap_sdir_get(sp)))
362 			continue;
363 
364 		for (pdemask = 1, va = sva ? sva : 0;
365 		    va < 0xfffff000; va += PAGE_SIZE) {
366 			if (pdemask != (va & PDE_MASK)) {
367 				pdemask = va & PDE_MASK;
368 				if (!(pde = pmap_pde_get(pd, va))) {
369 					va = pdemask + (~PDE_MASK + 1);
370 					va -= PAGE_SIZE;
371 					continue;
372 				}
373 				printf("%x:%8p:\n", sp, pde);
374 			}
375 
376 			if (!(pte = pmap_pte_get(pde, va)))
377 				continue;
378 
379 			printf("0x%08lx-0x%08x:%b\n", va, pte & ~PAGE_MASK,
380 			    TLB_PROT(pte & PAGE_MASK), TLB_BITS);
381 		}
382 	}
383 }
384 
385 void
pmap_dump_pv(paddr_t pa)386 pmap_dump_pv(paddr_t pa)
387 {
388 	struct vm_page *pg;
389 	struct pv_entry *pve;
390 
391 	pg = PHYS_TO_VM_PAGE(pa);
392 	if (pg != NULL) {
393 		for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next)
394 			printf("%x:%lx\n", pve->pv_pmap->pm_space, pve->pv_va);
395 	}
396 }
397 #endif
398 
399 int
pmap_check_alias(struct vm_page * pg,vaddr_t va,pt_entry_t pte)400 pmap_check_alias(struct vm_page *pg, vaddr_t va, pt_entry_t pte)
401 {
402 	struct pv_entry *pve;
403 	int ret = 0;
404 
405 	/* check for non-equ aliased mappings */
406 	mtx_enter(&pg->mdpage.pvh_mtx);
407 	for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) {
408 		pte |= pmap_vp_find(pve->pv_pmap, pve->pv_va);
409 		if ((va & HPPA_PGAOFF) != (pve->pv_va & HPPA_PGAOFF) &&
410 		    (pte & PTE_PROT(TLB_GATEWAY)) == 0 &&
411 		    (pte & PTE_PROT(TLB_WRITE))) {
412 #ifdef PMAPDEBUG
413 			printf("pmap_check_alias: "
414 			    "aliased writable mapping 0x%x:0x%lx\n",
415 			    pve->pv_pmap->pm_space, pve->pv_va);
416 #endif
417 			ret++;
418 		}
419 	}
420 	mtx_leave(&pg->mdpage.pvh_mtx);
421 
422 	return (ret);
423 }
424 
425 static __inline struct pv_entry *
pmap_pv_alloc(void)426 pmap_pv_alloc(void)
427 {
428 	struct pv_entry *pv;
429 
430 	DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pv_alloc()\n"));
431 
432 	pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
433 
434 	DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pv_alloc: %p\n", pv));
435 
436 	return (pv);
437 }
438 
439 static __inline void
pmap_pv_free(struct pv_entry * pv)440 pmap_pv_free(struct pv_entry *pv)
441 {
442 	if (pv->pv_ptp)
443 		pmap_pde_release(pv->pv_pmap, pv->pv_va, pv->pv_ptp);
444 
445 	pool_put(&pmap_pv_pool, pv);
446 }
447 
448 static __inline void
pmap_pv_enter(struct vm_page * pg,struct pv_entry * pve,struct pmap * pm,vaddr_t va,struct vm_page * pdep)449 pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, struct pmap *pm,
450     vaddr_t va, struct vm_page *pdep)
451 {
452 	DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_pv_enter(%p, %p, %p, 0x%lx, %p)\n",
453 	    pg, pve, pm, va, pdep));
454 	pve->pv_pmap = pm;
455 	pve->pv_va = va;
456 	pve->pv_ptp = pdep;
457 	mtx_enter(&pg->mdpage.pvh_mtx);
458 	pve->pv_next = pg->mdpage.pvh_list;
459 	pg->mdpage.pvh_list = pve;
460 	mtx_leave(&pg->mdpage.pvh_mtx);
461 }
462 
463 static __inline struct pv_entry *
pmap_pv_remove(struct vm_page * pg,struct pmap * pmap,vaddr_t va)464 pmap_pv_remove(struct vm_page *pg, struct pmap *pmap, vaddr_t va)
465 {
466 	struct pv_entry **pve, *pv;
467 
468 	mtx_enter(&pg->mdpage.pvh_mtx);
469 	for (pv = *(pve = &pg->mdpage.pvh_list);
470 	    pv; pv = *(pve = &(*pve)->pv_next))
471 		if (pv->pv_pmap == pmap && pv->pv_va == va) {
472 			*pve = pv->pv_next;
473 			break;
474 		}
475 	mtx_leave(&pg->mdpage.pvh_mtx);
476 	return (pv);
477 }
478 
479 void
pmap_bootstrap(vaddr_t vstart)480 pmap_bootstrap(vaddr_t vstart)
481 {
482 	extern int resvphysmem, etext, __rodata_end, __data_start;
483 	extern u_int *ie_mem;
484 	extern paddr_t hppa_vtop;
485 	vaddr_t va, addr = round_page(vstart), eaddr;
486 	vsize_t size;
487 	struct pmap *kpm;
488 	int npdes, nkpdes;
489 
490 	DPRINTF(PDB_FOLLOW|PDB_INIT, ("pmap_bootstrap(0x%lx)\n", vstart));
491 
492 	uvm_setpagesize();
493 
494 	hppa_prot[PROT_NONE]  = TLB_AR_NA;
495 	hppa_prot[PROT_READ]  = TLB_AR_R;
496 	hppa_prot[PROT_WRITE] = TLB_AR_RW;
497 	hppa_prot[PROT_READ | PROT_WRITE] = TLB_AR_RW;
498 	hppa_prot[PROT_EXEC]  = TLB_AR_X;
499 	hppa_prot[PROT_READ | PROT_EXEC] = TLB_AR_RX;
500 	hppa_prot[PROT_WRITE | PROT_EXEC] = TLB_AR_RWX;
501 	hppa_prot[PROT_READ | PROT_WRITE | PROT_EXEC] = TLB_AR_RWX;
502 
503 	/*
504 	 * Initialize kernel pmap
505 	 */
506 	kpm = &kernel_pmap_store;
507 	bzero(kpm, sizeof(*kpm));
508 	uvm_obj_init(&kpm->pm_obj, &pmap_pager, 1);
509 	kpm->pm_space = HPPA_SID_KERNEL;
510 	kpm->pm_pid = HPPA_PID_KERNEL;
511 	kpm->pm_pdir_pg = NULL;
512 	kpm->pm_pdir = (u_int32_t *)addr;
513 	bzero((void *)addr, PAGE_SIZE);
514 	fdcache(HPPA_SID_KERNEL, addr, PAGE_SIZE);
515 	addr += PAGE_SIZE;
516 
517 	/*
518 	 * Allocate various tables and structures.
519 	 */
520 
521 	mtctl(addr, CR_VTOP);
522 	hppa_vtop = addr;
523 	size = round_page((hppa_sid_max + 1) * 4);
524 	bzero((void *)addr, size);
525 	fdcache(HPPA_SID_KERNEL, addr, size);
526 	DPRINTF(PDB_INIT, ("vtop: 0x%lx @ 0x%lx\n", size, addr));
527 	addr += size;
528 	pmap_sdir_set(HPPA_SID_KERNEL, kpm->pm_pdir);
529 
530 	ie_mem = (u_int *)addr;
531 	addr += 0x8000;
532 
533 #ifdef USE_HPT
534 	if (pmap_hptsize) {
535 		struct vp_entry *hptp;
536 		int i, error;
537 
538 		/* must be aligned to the size XXX */
539 		if (addr & (pmap_hptsize - 1))
540 			addr += pmap_hptsize;
541 		addr &= ~(pmap_hptsize - 1);
542 
543 		bzero((void *)addr, pmap_hptsize);
544 		for (hptp = (struct vp_entry *)addr, i = pmap_hptsize / 16; i--;)
545 			hptp[i].vp_tag = 0xffff;
546 		pmap_hpt = addr;
547 		addr += pmap_hptsize;
548 
549 		DPRINTF(PDB_INIT, ("hpt_table: 0x%x @ %p\n",
550 		    pmap_hptsize, addr));
551 
552 		if ((error = (cpu_hpt_init)(pmap_hpt, pmap_hptsize)) < 0) {
553 			printf("WARNING: HPT init error %d -- DISABLED\n",
554 			    error);
555 			pmap_hpt = 0;
556 		} else
557 			DPRINTF(PDB_INIT,
558 			    ("HPT: installed for %d entries @ 0x%x\n",
559 			    pmap_hptsize / sizeof(struct vp_entry), addr));
560 	}
561 #endif
562 
563 	/* XXX PCXS needs this inserted into an IBTLB */
564 	/*	and can block-map the whole phys w/ another */
565 
566 	/*
567 	 * We use separate mappings for the first 4MB of kernel text
568 	 * and whetever is left to avoid the mapping to cover kernel
569 	 * data.
570 	 */
571 	for (va = 0; va < (vaddr_t)&etext; va += size) {
572 		size = (vaddr_t)&etext - va;
573 		if (size > 4 * 1024 * 1024)
574 			size = 4 * 1024 * 1024;
575 
576 		if (btlb_insert(HPPA_SID_KERNEL, va, va, &size,
577 		    pmap_sid2pid(HPPA_SID_KERNEL) |
578 		    pmap_prot(pmap_kernel(), PROT_READ | PROT_EXEC)) < 0) {
579 			printf("WARNING: cannot block map kernel text\n");
580 			break;
581 		}
582 	}
583 
584 	if (&__rodata_end < &__data_start) {
585 		physical_steal = (vaddr_t)&__rodata_end;
586 		physical_end = (vaddr_t)&__data_start;
587 		DPRINTF(PDB_INIT, ("physpool: 0x%lx @ 0x%lx\n",
588 		    physical_end - physical_steal, physical_steal));
589 	}
590 
591 	/* kernel virtual is the last gig of the moohicans */
592 	nkpdes = physmem >> 14;	/* at least 16/gig for kmem */
593 	if (nkpdes < 4)
594 		nkpdes = 4;		/* ... but no less than four */
595 	nkpdes += HPPA_IOLEN / PDE_SIZE; /* ... and io space too */
596 	npdes = nkpdes + (physmem + atop(PDE_SIZE) - 1) / atop(PDE_SIZE);
597 
598 	/* map the pdes */
599 	for (va = 0; npdes--; va += PDE_SIZE, addr += PAGE_SIZE) {
600 
601 		/* last nkpdes are for the kernel virtual */
602 		if (npdes == nkpdes - 1)
603 			va = SYSCALLGATE;
604 		if (npdes == HPPA_IOLEN / PDE_SIZE - 1)
605 			va = HPPA_IOBEGIN;
606 		/* now map the pde for the physmem */
607 		bzero((void *)addr, PAGE_SIZE);
608 		fdcache(HPPA_SID_KERNEL, addr, PAGE_SIZE);
609 		DPRINTF(PDB_INIT|PDB_VP,
610 		    ("pde premap 0x%lx 0x%lx\n", va, addr));
611 		pmap_pde_set(kpm, va, addr);
612 		kpm->pm_stats.resident_count++; /* count PTP as resident */
613 	}
614 
615 	resvphysmem = atop(addr);
616 	eaddr = physmem - atop(round_page(MSGBUFSIZE));
617 	DPRINTF(PDB_INIT, ("physmem: 0x%x - 0x%lx\n", resvphysmem, eaddr));
618 	uvm_page_physload(0, eaddr, resvphysmem, eaddr, 0);
619 
620 	/* TODO optimize/inline the kenter */
621 	for (va = 0; va < ptoa(physmem); va += PAGE_SIZE) {
622 		extern struct user *proc0paddr;
623 		vm_prot_t prot = PROT_READ | PROT_WRITE;
624 
625 		if (va < (vaddr_t)&etext)
626 			prot = PROT_READ | PROT_EXEC;
627 		else if (va < (vaddr_t)&__rodata_end)
628 			prot = PROT_READ;
629 		else if (va == (vaddr_t)proc0paddr + USPACE)
630 			prot = PROT_NONE;
631 
632 		pmap_kenter_pa(va, va, prot);
633 	}
634 
635 	DPRINTF(PDB_INIT, ("bootstrap: mapped %p - 0x%lx\n", &etext, va));
636 }
637 
638 void
pmap_init(void)639 pmap_init(void)
640 {
641 	DPRINTF(PDB_FOLLOW|PDB_INIT, ("pmap_init()\n"));
642 
643 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, IPL_NONE, 0,
644 	    "pmappl", NULL);
645 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, IPL_VM, 0,
646 	    "pmappv", NULL);
647 	pool_setlowat(&pmap_pv_pool, pmap_pvlowat);
648 	pool_sethiwat(&pmap_pv_pool, pmap_pvlowat * 32);
649 
650 	pmap_initialized = 1;
651 
652 	/*
653 	 * map SysCall gateways page once for everybody
654 	 * NB: we'll have to remap the phys memory
655 	 *     if we have any at SYSCALLGATE address (;
656 	 */
657 	{
658 		volatile pt_entry_t *pde;
659 
660 		if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, SYSCALLGATE)) &&
661 		    !(pde = pmap_pde_alloc(pmap_kernel(), SYSCALLGATE, NULL)))
662 			panic("pmap_init: cannot allocate pde");
663 
664 		pmap_pte_set(pde, SYSCALLGATE, (paddr_t)&gateway_page |
665 		    PTE_PROT(TLB_GATE_PROT));
666 	}
667 
668 	DPRINTF(PDB_FOLLOW|PDB_INIT, ("pmap_init(): done\n"));
669 }
670 
671 void
pmap_virtual_space(vaddr_t * startp,vaddr_t * endp)672 pmap_virtual_space(vaddr_t *startp, vaddr_t *endp)
673 {
674 	*startp = SYSCALLGATE + PAGE_SIZE;
675 	*endp = VM_MAX_KERNEL_ADDRESS;
676 }
677 
678 struct pmap *
pmap_create(void)679 pmap_create(void)
680 {
681 	struct pmap *pmap;
682 	pa_space_t space;
683 
684 	DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_create()\n"));
685 
686 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
687 
688 	mtx_init(&pmap->pm_mtx, IPL_VM);
689 
690 	uvm_obj_init(&pmap->pm_obj, &pmap_pager, 1);
691 
692 	for (space = 1 + arc4random_uniform(hppa_sid_max);
693 	    pmap_sdir_get(space); space = (space + 1) % hppa_sid_max);
694 
695 	if ((pmap->pm_pdir_pg = pmap_pagealloc(NULL, 0)) == NULL)
696 		panic("pmap_create: no pages");
697 	pmap->pm_ptphint = NULL;
698 	pmap->pm_pdir = (u_int32_t *)VM_PAGE_TO_PHYS(pmap->pm_pdir_pg);
699 	pmap_sdir_set(space, pmap->pm_pdir);
700 
701 	pmap->pm_space = space;
702 	pmap->pm_pid = (space + 1) << 1;
703 
704 	pmap->pm_stats.resident_count = 1;
705 	pmap->pm_stats.wired_count = 0;
706 
707 	return (pmap);
708 }
709 
710 void
pmap_destroy(struct pmap * pmap)711 pmap_destroy(struct pmap *pmap)
712 {
713 	paddr_t pa;
714 	int refs;
715 
716 	DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_destroy(%p)\n", pmap));
717 
718 	refs = atomic_dec_int_nv(&pmap->pm_obj.uo_refs);
719 	if (refs > 0)
720 		return;
721 
722 	KASSERT(RBT_EMPTY(uvm_objtree, &pmap->pm_obj.memt));
723 
724 	pmap_sdir_set(pmap->pm_space, 0);
725 
726 	pa = VM_PAGE_TO_PHYS(pmap->pm_pdir_pg);
727 	pdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
728 	pdtlb(HPPA_SID_KERNEL, pa);
729 	uvm_pagefree(pmap->pm_pdir_pg);
730 
731 	pmap->pm_pdir_pg = NULL;
732 	pool_put(&pmap_pmap_pool, pmap);
733 }
734 
735 /*
736  * Add a reference to the specified pmap.
737  */
738 void
pmap_reference(struct pmap * pmap)739 pmap_reference(struct pmap *pmap)
740 {
741 	DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_reference(%p)\n", pmap));
742 
743 	atomic_inc_int(&pmap->pm_obj.uo_refs);
744 }
745 
746 int
pmap_enter(struct pmap * pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,int flags)747 pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
748 {
749 	volatile pt_entry_t *pde;
750 	pt_entry_t pte;
751 	struct vm_page *pg, *ptp = NULL;
752 	struct pv_entry *pve = NULL;
753 	boolean_t wired = (flags & PMAP_WIRED) != 0;
754 
755 	DPRINTF(PDB_FOLLOW|PDB_ENTER,
756 	    ("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
757 	    pmap, va, pa, prot, flags));
758 	pmap_lock(pmap);
759 
760 	if (!(pde = pmap_pde_get(pmap->pm_pdir, va)) &&
761 	    !(pde = pmap_pde_alloc(pmap, va, &ptp))) {
762 		if (flags & PMAP_CANFAIL) {
763 			pmap_unlock(pmap);
764 			return (ENOMEM);
765 		}
766 		panic("pmap_enter: cannot allocate pde");
767 	}
768 
769 	if (!ptp)
770 		ptp = pmap_pde_ptp(pmap, pde);
771 
772 	if ((pte = pmap_pte_get(pde, va))) {
773 		DPRINTF(PDB_ENTER,
774 		    ("pmap_enter: remapping 0x%x -> 0x%lx\n", pte, pa));
775 
776 		pmap_pte_flush(pmap, va, pte);
777 		if (wired && !(pte & PTE_PROT(TLB_WIRED)))
778 			pmap->pm_stats.wired_count++;
779 		else if (!wired && (pte & PTE_PROT(TLB_WIRED)))
780 			pmap->pm_stats.wired_count--;
781 
782 		if (PTE_PAGE(pte) == pa) {
783 			DPRINTF(PDB_FOLLOW|PDB_ENTER,
784 			    ("pmap_enter: same page\n"));
785 			goto enter;
786 		}
787 
788 		pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
789 		if (pg != NULL) {
790 			pve = pmap_pv_remove(pg, pmap, va);
791 			atomic_setbits_int(&pg->pg_flags, pmap_pvh_attrs(pte));
792 		}
793 	} else {
794 		DPRINTF(PDB_ENTER,
795 		    ("pmap_enter: new mapping 0x%lx -> 0x%lx\n", va, pa));
796 		pte = PTE_PROT(TLB_REFTRAP);
797 		pmap->pm_stats.resident_count++;
798 		if (wired)
799 			pmap->pm_stats.wired_count++;
800 		if (ptp)
801 			ptp->wire_count++;
802 	}
803 
804 	if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pa)))) {
805 		if (!pve && !(pve = pmap_pv_alloc())) {
806 			if (flags & PMAP_CANFAIL) {
807 				pmap_unlock(pmap);
808 				return (ENOMEM);
809 			}
810 			panic("pmap_enter: no pv entries available");
811 		}
812 		pte |= PTE_PROT(pmap_prot(pmap, prot));
813 		if (pmap_check_alias(pg, va, pte))
814 			pmap_page_remove(pg);
815 		pmap_pv_enter(pg, pve, pmap, va, ptp);
816 	} else if (pve)
817 		pmap_pv_free(pve);
818 
819 enter:
820 	/* preserve old ref & mod */
821 	pte = pa | PTE_PROT(pmap_prot(pmap, prot)) |
822 	    (pte & PTE_PROT(TLB_UNCACHABLE|TLB_DIRTY|TLB_REFTRAP));
823 	if (IS_IOPAGE(pa))
824 		pte |= PTE_PROT(TLB_UNCACHABLE);
825 	if (wired)
826 		pte |= PTE_PROT(TLB_WIRED);
827 	pmap_pte_set(pde, va, pte);
828 
829 	DPRINTF(PDB_FOLLOW|PDB_ENTER, ("pmap_enter: leaving\n"));
830 	pmap_unlock(pmap);
831 
832 	return (0);
833 }
834 
835 void
pmap_remove(struct pmap * pmap,vaddr_t sva,vaddr_t eva)836 pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva)
837 {
838 	struct pv_entry *pve;
839 	volatile pt_entry_t *pde;
840 	pt_entry_t pte;
841 	struct vm_page *pg, *ptp;
842 	vaddr_t pdemask;
843 	int batch;
844 
845 	DPRINTF(PDB_FOLLOW|PDB_REMOVE,
846 	    ("pmap_remove(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva));
847 	pmap_lock(pmap);
848 
849 	for (batch = 0; sva < eva; sva += PAGE_SIZE) {
850 		pdemask = sva & PDE_MASK;
851 		if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
852 			sva = pdemask + (~PDE_MASK + 1) - PAGE_SIZE;
853 			continue;
854 		}
855 		if (pdemask == sva) {
856 			if (sva + (~PDE_MASK + 1) <= eva)
857 				batch = 1;
858 			else
859 				batch = 0;
860 		}
861 
862 		if ((pte = pmap_pte_get(pde, sva))) {
863 
864 			/* TODO measure here the speed tradeoff
865 			 * for flushing whole 4M vs per-page
866 			 * in case of non-complete pde fill
867 			 */
868 			pmap_pte_flush(pmap, sva, pte);
869 			if (pte & PTE_PROT(TLB_WIRED))
870 				pmap->pm_stats.wired_count--;
871 			pmap->pm_stats.resident_count--;
872 
873 			/* iff properly accounted pde will be dropped anyway */
874 			if (!batch)
875 				pmap_pte_set(pde, sva, 0);
876 
877 			if (pmap_initialized &&
878 			    (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
879 				atomic_setbits_int(&pg->pg_flags,
880 				    pmap_pvh_attrs(pte));
881 				if ((pve = pmap_pv_remove(pg, pmap, sva)))
882 					pmap_pv_free(pve);
883 			} else {
884 				if (IS_IOPAGE(PTE_PAGE(pte))) {
885 					ptp = pmap_pde_ptp(pmap, pde);
886 					if (ptp != NULL)
887 						pmap_pde_release(pmap, sva, ptp);
888 				}
889 			}
890 		}
891 	}
892 
893 	DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("pmap_remove: leaving\n"));
894 	pmap_unlock(pmap);
895 }
896 
897 void
pmap_page_write_protect(struct vm_page * pg)898 pmap_page_write_protect(struct vm_page *pg)
899 {
900 	struct pv_entry *pve;
901 	int attrs;
902 
903 	DPRINTF(PDB_FOLLOW|PDB_BITS, ("pmap_page_write_protect(%p)\n", pg));
904 
905 	attrs = 0;
906 	mtx_enter(&pg->mdpage.pvh_mtx);
907 	for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) {
908 		struct pmap *pmap = pve->pv_pmap;
909 		vaddr_t va = pve->pv_va;
910 		volatile pt_entry_t *pde;
911 		pt_entry_t opte, pte;
912 
913 		if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
914 			opte = pte = pmap_pte_get(pde, va);
915 			if (pte & TLB_GATEWAY)
916 				continue;
917 			pte &= ~TLB_WRITE;
918 			attrs |= pmap_pvh_attrs(pte);
919 
920 			if (opte != pte) {
921 				pmap_pte_flush(pmap, va, opte);
922 				pmap_pte_set(pde, va, pte);
923 			}
924 		}
925 	}
926 	mtx_leave(&pg->mdpage.pvh_mtx);
927 	if (attrs != (PG_PMAP_REF | PG_PMAP_MOD))
928 		atomic_clearbits_int(&pg->pg_flags,
929 		    attrs ^(PG_PMAP_REF | PG_PMAP_MOD));
930 	if (attrs != 0)
931 		atomic_setbits_int(&pg->pg_flags, attrs);
932 }
933 
934 void
pmap_write_protect(struct pmap * pmap,vaddr_t sva,vaddr_t eva,vm_prot_t prot)935 pmap_write_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
936 {
937 	struct vm_page *pg;
938 	volatile pt_entry_t *pde;
939 	pt_entry_t pte;
940 	u_int tlbprot, pdemask;
941 
942 	DPRINTF(PDB_FOLLOW|PDB_PMAP,
943 	    ("pmap_write_protect(%p, %lx, %lx, %x)\n", pmap, sva, eva, prot));
944 	pmap_lock(pmap);
945 
946 	sva = trunc_page(sva);
947 	tlbprot = PTE_PROT(pmap_prot(pmap, prot));
948 
949 	for (pdemask = 1; sva < eva; sva += PAGE_SIZE) {
950 		if (pdemask != (sva & PDE_MASK)) {
951 			pdemask = sva & PDE_MASK;
952 			if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
953 				sva = pdemask + (~PDE_MASK + 1) - PAGE_SIZE;
954 				continue;
955 			}
956 		}
957 		if ((pte = pmap_pte_get(pde, sva))) {
958 
959 			DPRINTF(PDB_PMAP,
960 			    ("pmap_write_protect: va=0x%lx pte=0x%x\n",
961 			    sva,  pte));
962 			/*
963 			 * Determine if mapping is changing.
964 			 * If not, nothing to do.
965 			 */
966 			if ((pte & PTE_PROT(TLB_AR_MASK)) == tlbprot)
967 				continue;
968 
969 			pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
970 			if (pg != NULL) {
971 				atomic_setbits_int(&pg->pg_flags,
972 				    pmap_pvh_attrs(pte));
973 			}
974 
975 			pmap_pte_flush(pmap, sva, pte);
976 			pte &= ~PTE_PROT(TLB_AR_MASK);
977 			pte |= tlbprot;
978 			pmap_pte_set(pde, sva, pte);
979 		}
980 	}
981 
982 	pmap_unlock(pmap);
983 }
984 
985 void
pmap_page_remove(struct vm_page * pg)986 pmap_page_remove(struct vm_page *pg)
987 {
988 	struct pv_entry *pve;
989 
990 	DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_page_remove(%p)\n", pg));
991 
992 	if (pg->mdpage.pvh_list == NULL)
993 		return;
994 
995 	mtx_enter(&pg->mdpage.pvh_mtx);
996 	while ((pve = pg->mdpage.pvh_list)) {
997 		struct pmap *pmap = pve->pv_pmap;
998 		vaddr_t va = pve->pv_va;
999 		volatile pt_entry_t *pde;
1000 		pt_entry_t pte;
1001 		u_int attrs;
1002 
1003 		pg->mdpage.pvh_list = pve->pv_next;
1004 		pmap_reference(pmap);
1005 		mtx_leave(&pg->mdpage.pvh_mtx);
1006 
1007 		pmap_lock(pmap);
1008 		pde = pmap_pde_get(pmap->pm_pdir, va);
1009 		pte = pmap_pte_get(pde, va);
1010 		attrs = pmap_pvh_attrs(pte);
1011 
1012 		pmap_pte_flush(pmap, va, pte);
1013 		if (pte & PTE_PROT(TLB_WIRED))
1014 			pmap->pm_stats.wired_count--;
1015 		pmap->pm_stats.resident_count--;
1016 
1017 		pmap_pte_set(pde, va, 0);
1018 		pmap_unlock(pmap);
1019 
1020 		pmap_destroy(pmap);
1021 		pmap_pv_free(pve);
1022 		atomic_setbits_int(&pg->pg_flags, attrs);
1023 		mtx_enter(&pg->mdpage.pvh_mtx);
1024 	}
1025 	mtx_leave(&pg->mdpage.pvh_mtx);
1026 
1027 	DPRINTF(PDB_FOLLOW|PDB_PV, ("pmap_page_remove: leaving\n"));
1028 }
1029 
1030 void
pmap_unwire(struct pmap * pmap,vaddr_t va)1031 pmap_unwire(struct pmap *pmap, vaddr_t	va)
1032 {
1033 	volatile pt_entry_t *pde;
1034 	pt_entry_t pte = 0;
1035 
1036 	DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_unwire(%p, 0x%lx)\n", pmap, va));
1037 	pmap_lock(pmap);
1038 
1039 	if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
1040 		pte = pmap_pte_get(pde, va);
1041 
1042 		if (pte & PTE_PROT(TLB_WIRED)) {
1043 			pte &= ~PTE_PROT(TLB_WIRED);
1044 			pmap->pm_stats.wired_count--;
1045 			pmap_pte_set(pde, va, pte);
1046 		}
1047 	}
1048 
1049 	DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_unwire: leaving\n"));
1050 	pmap_unlock(pmap);
1051 
1052 #ifdef DIAGNOSTIC
1053 	if (!pte)
1054 		panic("pmap_unwire: invalid va 0x%lx", va);
1055 #endif
1056 }
1057 
1058 boolean_t
pmap_changebit(struct vm_page * pg,u_int set,u_int clear)1059 pmap_changebit(struct vm_page *pg, u_int set, u_int clear)
1060 {
1061 	struct pv_entry *pve;
1062 	pt_entry_t res;
1063 	int attrs;
1064 
1065 	DPRINTF(PDB_FOLLOW|PDB_BITS,
1066 	    ("pmap_changebit(%p, %x, %x)\n", pg, set, clear));
1067 
1068 	res = 0;
1069 	attrs = 0;
1070 	mtx_enter(&pg->mdpage.pvh_mtx);
1071 	for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) {
1072 		struct pmap *pmap = pve->pv_pmap;
1073 		vaddr_t va = pve->pv_va;
1074 		volatile pt_entry_t *pde;
1075 		pt_entry_t opte, pte;
1076 
1077 		if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
1078 			opte = pte = pmap_pte_get(pde, va);
1079 #ifdef PMAPDEBUG
1080 			if (!pte) {
1081 				printf("pmap_changebit: zero pte for 0x%lx\n",
1082 				    va);
1083 				continue;
1084 			}
1085 #endif
1086 			pte &= ~clear;
1087 			pte |= set;
1088 			attrs |= pmap_pvh_attrs(pte);
1089 			res |= pmap_pvh_attrs(opte);
1090 
1091 			if (opte != pte) {
1092 				pmap_pte_flush(pmap, va, opte);
1093 				pmap_pte_set(pde, va, pte);
1094 			}
1095 		}
1096 	}
1097 	mtx_leave(&pg->mdpage.pvh_mtx);
1098 	if (attrs != (PG_PMAP_REF | PG_PMAP_MOD))
1099 		atomic_clearbits_int(&pg->pg_flags,
1100 		    attrs ^(PG_PMAP_REF | PG_PMAP_MOD));
1101 	if (attrs != 0)
1102 		atomic_setbits_int(&pg->pg_flags, attrs);
1103 
1104 	return ((res & (clear | set)) != 0);
1105 }
1106 
1107 boolean_t
pmap_testbit(struct vm_page * pg,int bit)1108 pmap_testbit(struct vm_page *pg, int bit)
1109 {
1110 	struct pv_entry *pve;
1111 	pt_entry_t pte;
1112 	boolean_t ret;
1113 
1114 	DPRINTF(PDB_FOLLOW|PDB_BITS, ("pmap_testbit(%p, %x)\n", pg, bit));
1115 
1116 	mtx_enter(&pg->mdpage.pvh_mtx);
1117 	for (pve = pg->mdpage.pvh_list; !(pg->pg_flags & bit) && pve;
1118 	    pve = pve->pv_next) {
1119 		pte = pmap_vp_find(pve->pv_pmap, pve->pv_va);
1120 		atomic_setbits_int(&pg->pg_flags, pmap_pvh_attrs(pte));
1121 	}
1122 	mtx_leave(&pg->mdpage.pvh_mtx);
1123 	ret = ((pg->pg_flags & bit) != 0);
1124 
1125 	return ret;
1126 }
1127 
1128 boolean_t
pmap_extract(struct pmap * pmap,vaddr_t va,paddr_t * pap)1129 pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap)
1130 {
1131 	pt_entry_t pte;
1132 
1133 	DPRINTF(PDB_FOLLOW|PDB_EXTRACT, ("pmap_extract(%p, %lx)\n", pmap, va));
1134 
1135 	pmap_lock(pmap);
1136 	pte = pmap_vp_find(pmap, va);
1137 	pmap_unlock(pmap);
1138 
1139 	if (pte) {
1140 		if (pap)
1141 			*pap = (pte & ~PGOFSET) | (va & PGOFSET);
1142 		return (TRUE);
1143 	}
1144 
1145 	return (FALSE);
1146 }
1147 
1148 void
pmap_activate(struct proc * p)1149 pmap_activate(struct proc *p)
1150 {
1151 	struct pmap *pmap = p->p_vmspace->vm_map.pmap;
1152 	struct pcb *pcb = &p->p_addr->u_pcb;
1153 
1154 	pcb->pcb_space = pmap->pm_space;
1155 }
1156 
1157 void
pmap_deactivate(struct proc * p)1158 pmap_deactivate(struct proc *p)
1159 {
1160 
1161 }
1162 
1163 static __inline void
pmap_flush_page(struct vm_page * pg,int purge)1164 pmap_flush_page(struct vm_page *pg, int purge)
1165 {
1166 	struct pv_entry *pve;
1167 
1168 	/* purge cache for all possible mappings for the pa */
1169 	mtx_enter(&pg->mdpage.pvh_mtx);
1170 	for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) {
1171 		if (purge)
1172 			pdcache(pve->pv_pmap->pm_space, pve->pv_va, PAGE_SIZE);
1173 		else
1174 			fdcache(pve->pv_pmap->pm_space, pve->pv_va, PAGE_SIZE);
1175 		ficache(pve->pv_pmap->pm_space, pve->pv_va, PAGE_SIZE);
1176 		pdtlb(pve->pv_pmap->pm_space, pve->pv_va);
1177 		pitlb(pve->pv_pmap->pm_space, pve->pv_va);
1178 	}
1179 	mtx_leave(&pg->mdpage.pvh_mtx);
1180 }
1181 
1182 void
pmap_zero_page(struct vm_page * pg)1183 pmap_zero_page(struct vm_page *pg)
1184 {
1185 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
1186 
1187 	DPRINTF(PDB_FOLLOW|PDB_PHYS, ("pmap_zero_page(%lx)\n", pa));
1188 
1189 	pmap_flush_page(pg, 1);
1190 	bzero((void *)pa, PAGE_SIZE);
1191 	fdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
1192 	pdtlb(HPPA_SID_KERNEL, pa);
1193 }
1194 
1195 void
pmap_copy_page(struct vm_page * srcpg,struct vm_page * dstpg)1196 pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
1197 {
1198 	paddr_t spa = VM_PAGE_TO_PHYS(srcpg);
1199 	paddr_t dpa = VM_PAGE_TO_PHYS(dstpg);
1200 	DPRINTF(PDB_FOLLOW|PDB_PHYS, ("pmap_copy_page(%lx, %lx)\n", spa, dpa));
1201 
1202 	pmap_flush_page(srcpg, 0);
1203 	pmap_flush_page(dstpg, 1);
1204 	bcopy((void *)spa, (void *)dpa, PAGE_SIZE);
1205 	pdcache(HPPA_SID_KERNEL, spa, PAGE_SIZE);
1206 	fdcache(HPPA_SID_KERNEL, dpa, PAGE_SIZE);
1207 	pdtlb(HPPA_SID_KERNEL, spa);
1208 	pdtlb(HPPA_SID_KERNEL, dpa);
1209 }
1210 
1211 void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot)1212 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
1213 {
1214 	volatile pt_entry_t *pde;
1215 	pt_entry_t pte, opte;
1216 
1217 	DPRINTF(PDB_FOLLOW|PDB_ENTER,
1218 	    ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
1219 
1220 	if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, va)) &&
1221 	    !(pde = pmap_pde_alloc(pmap_kernel(), va, NULL)))
1222 		panic("pmap_kenter_pa: cannot allocate pde for va=0x%lx", va);
1223 	opte = pmap_pte_get(pde, va);
1224 	pte = pa | PTE_PROT(TLB_WIRED | TLB_REFTRAP |
1225 	    pmap_prot(pmap_kernel(), prot));
1226 	if (IS_IOPAGE(pa))
1227 		pte |= PTE_PROT(TLB_UNCACHABLE);
1228 	if (opte)
1229 		pmap_pte_flush(pmap_kernel(), va, opte);
1230 	pmap_pte_set(pde, va, pte);
1231 	pmap_kernel()->pm_stats.wired_count++;
1232 	pmap_kernel()->pm_stats.resident_count++;
1233 
1234 #ifdef PMAPDEBUG
1235 	{
1236 		struct vm_page *pg;
1237 
1238 		if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
1239 			if (pmap_check_alias(pg, va, pte))
1240 				db_enter();
1241 		}
1242 	}
1243 #endif
1244 	DPRINTF(PDB_FOLLOW|PDB_ENTER, ("pmap_kenter_pa: leaving\n"));
1245 }
1246 
1247 void
pmap_kremove(vaddr_t va,vsize_t size)1248 pmap_kremove(vaddr_t va, vsize_t size)
1249 {
1250 	struct pv_entry *pve;
1251 	vaddr_t eva, pdemask;
1252 	volatile pt_entry_t *pde;
1253 	pt_entry_t pte;
1254 	struct vm_page *pg;
1255 
1256 	DPRINTF(PDB_FOLLOW|PDB_REMOVE,
1257 	    ("pmap_kremove(%lx, %lx)\n", va, size));
1258 #ifdef PMAPDEBUG
1259 	if (va < ptoa(physmem)) {
1260 		printf("pmap_kremove(%lx, %lx): unmapping physmem\n", va, size);
1261 		return;
1262 	}
1263 #endif
1264 
1265 	for (pdemask = 1, eva = va + size; va < eva; va += PAGE_SIZE) {
1266 		if (pdemask != (va & PDE_MASK)) {
1267 			pdemask = va & PDE_MASK;
1268 			if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, va))) {
1269 				va = pdemask + (~PDE_MASK + 1) - PAGE_SIZE;
1270 				continue;
1271 			}
1272 		}
1273 		if (!(pte = pmap_pte_get(pde, va))) {
1274 #ifdef DEBUG
1275 			printf("pmap_kremove: unmapping unmapped 0x%x\n", va);
1276 #endif
1277 			continue;
1278 		}
1279 
1280 		pmap_pte_flush(pmap_kernel(), va, pte);
1281 		pmap_pte_set(pde, va, 0);
1282 		if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
1283 			atomic_setbits_int(&pg->pg_flags, pmap_pvh_attrs(pte));
1284 			/* just in case we have enter/kenter mismatch */
1285 			if ((pve = pmap_pv_remove(pg, pmap_kernel(), va)))
1286 				pmap_pv_free(pve);
1287 		}
1288 	}
1289 
1290 	DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("pmap_kremove: leaving\n"));
1291 }
1292 
1293 void
pmap_proc_iflush(struct process * pr,vaddr_t va,vsize_t len)1294 pmap_proc_iflush(struct process *pr, vaddr_t va, vsize_t len)
1295 {
1296 	pmap_t pmap = vm_map_pmap(&pr->ps_vmspace->vm_map);
1297 
1298 	fdcache(pmap->pm_space, va, len);
1299 	sync_caches();
1300 	ficache(pmap->pm_space, va, len);
1301 	sync_caches();
1302 }
1303 
1304 struct vm_page *
pmap_unmap_direct(vaddr_t va)1305 pmap_unmap_direct(vaddr_t va)
1306 {
1307 	fdcache(HPPA_SID_KERNEL, va, PAGE_SIZE);
1308 	pdtlb(HPPA_SID_KERNEL, va);
1309 	return (PHYS_TO_VM_PAGE(va));
1310 }
1311