xref: /freebsd/sys/powerpc/aim/mmu_oea64.c (revision 38069501)
1 /*-
2  * Copyright (c) 2008-2015 Nathan Whitehorn
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 /*
31  * Manages physical address maps.
32  *
33  * Since the information managed by this module is also stored by the
34  * logical address mapping module, this module may throw away valid virtual
35  * to physical mappings at almost any time.  However, invalidations of
36  * mappings must be done as requested.
37  *
38  * In order to cope with hardware architectures which make virtual to
39  * physical map invalidates expensive, this module may delay invalidate
40  * reduced protection operations until such time as they are actually
41  * necessary.  This module is given full information as to which processors
42  * are currently using which maps, and to when physical maps must be made
43  * correct.
44  */
45 
46 #include "opt_compat.h"
47 #include "opt_kstack_pages.h"
48 
49 #include <sys/param.h>
50 #include <sys/kernel.h>
51 #include <sys/conf.h>
52 #include <sys/queue.h>
53 #include <sys/cpuset.h>
54 #include <sys/kerneldump.h>
55 #include <sys/ktr.h>
56 #include <sys/lock.h>
57 #include <sys/msgbuf.h>
58 #include <sys/malloc.h>
59 #include <sys/mutex.h>
60 #include <sys/proc.h>
61 #include <sys/rwlock.h>
62 #include <sys/sched.h>
63 #include <sys/sysctl.h>
64 #include <sys/systm.h>
65 #include <sys/vmmeter.h>
66 #include <sys/smp.h>
67 
68 #include <sys/kdb.h>
69 
70 #include <dev/ofw/openfirm.h>
71 
72 #include <vm/vm.h>
73 #include <vm/vm_param.h>
74 #include <vm/vm_kern.h>
75 #include <vm/vm_page.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_object.h>
78 #include <vm/vm_extern.h>
79 #include <vm/vm_pageout.h>
80 #include <vm/uma.h>
81 
82 #include <machine/_inttypes.h>
83 #include <machine/cpu.h>
84 #include <machine/platform.h>
85 #include <machine/frame.h>
86 #include <machine/md_var.h>
87 #include <machine/psl.h>
88 #include <machine/bat.h>
89 #include <machine/hid.h>
90 #include <machine/pte.h>
91 #include <machine/sr.h>
92 #include <machine/trap.h>
93 #include <machine/mmuvar.h>
94 
95 #include "mmu_oea64.h"
96 #include "mmu_if.h"
97 #include "moea64_if.h"
98 
99 void moea64_release_vsid(uint64_t vsid);
100 uintptr_t moea64_get_unique_vsid(void);
101 
102 #define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR)
103 #define ENABLE_TRANS(msr)	mtmsr(msr)
104 
105 #define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
106 #define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
107 #define	VSID_HASH_MASK		0x0000007fffffffffULL
108 
109 /*
110  * Locking semantics:
111  *
112  * There are two locks of interest: the page locks and the pmap locks, which
113  * protect their individual PVO lists and are locked in that order. The contents
114  * of all PVO entries are protected by the locks of their respective pmaps.
115  * The pmap of any PVO is guaranteed not to change so long as the PVO is linked
116  * into any list.
117  *
118  */
119 
120 #define PV_LOCK_COUNT	PA_LOCK_COUNT*3
121 static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
122 
123 #define PV_LOCKPTR(pa)	((struct mtx *)(&pv_lock[pa_index(pa) % PV_LOCK_COUNT]))
124 #define PV_LOCK(pa)		mtx_lock(PV_LOCKPTR(pa))
125 #define PV_UNLOCK(pa)		mtx_unlock(PV_LOCKPTR(pa))
126 #define PV_LOCKASSERT(pa) 	mtx_assert(PV_LOCKPTR(pa), MA_OWNED)
127 #define PV_PAGE_LOCK(m)		PV_LOCK(VM_PAGE_TO_PHYS(m))
128 #define PV_PAGE_UNLOCK(m)	PV_UNLOCK(VM_PAGE_TO_PHYS(m))
129 #define PV_PAGE_LOCKASSERT(m)	PV_LOCKASSERT(VM_PAGE_TO_PHYS(m))
130 
131 struct ofw_map {
132 	cell_t	om_va;
133 	cell_t	om_len;
134 	uint64_t om_pa;
135 	cell_t	om_mode;
136 };
137 
138 extern unsigned char _etext[];
139 extern unsigned char _end[];
140 
141 /*
142  * Map of physical memory regions.
143  */
144 static struct	mem_region *regions;
145 static struct	mem_region *pregions;
146 static u_int	phys_avail_count;
147 static int	regions_sz, pregions_sz;
148 
149 extern void bs_remap_earlyboot(void);
150 
151 /*
152  * Lock for the SLB tables.
153  */
154 struct mtx	moea64_slb_mutex;
155 
156 /*
157  * PTEG data.
158  */
159 u_int		moea64_pteg_count;
160 u_int		moea64_pteg_mask;
161 
162 /*
163  * PVO data.
164  */
165 
166 uma_zone_t	moea64_pvo_zone; /* zone for pvo entries */
167 
168 static struct	pvo_entry *moea64_bpvo_pool;
169 static int	moea64_bpvo_pool_index = 0;
170 static int	moea64_bpvo_pool_size = 327680;
171 TUNABLE_INT("machdep.moea64_bpvo_pool_size", &moea64_bpvo_pool_size);
172 SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD,
173     &moea64_bpvo_pool_index, 0, "");
174 
175 #define	VSID_NBPW	(sizeof(u_int32_t) * 8)
176 #ifdef __powerpc64__
177 #define	NVSIDS		(NPMAPS * 16)
178 #define VSID_HASHMASK	0xffffffffUL
179 #else
180 #define NVSIDS		NPMAPS
181 #define VSID_HASHMASK	0xfffffUL
182 #endif
183 static u_int	moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
184 
185 static boolean_t moea64_initialized = FALSE;
186 
187 /*
188  * Statistics.
189  */
190 u_int	moea64_pte_valid = 0;
191 u_int	moea64_pte_overflow = 0;
192 u_int	moea64_pvo_entries = 0;
193 u_int	moea64_pvo_enter_calls = 0;
194 u_int	moea64_pvo_remove_calls = 0;
195 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
196     &moea64_pte_valid, 0, "");
197 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
198     &moea64_pte_overflow, 0, "");
199 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
200     &moea64_pvo_entries, 0, "");
201 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
202     &moea64_pvo_enter_calls, 0, "");
203 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
204     &moea64_pvo_remove_calls, 0, "");
205 
206 vm_offset_t	moea64_scratchpage_va[2];
207 struct pvo_entry *moea64_scratchpage_pvo[2];
208 struct	mtx	moea64_scratchpage_mtx;
209 
210 uint64_t 	moea64_large_page_mask = 0;
211 uint64_t	moea64_large_page_size = 0;
212 int		moea64_large_page_shift = 0;
213 
214 /*
215  * PVO calls.
216  */
217 static int	moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo,
218 		    struct pvo_head *pvo_head);
219 static void	moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo);
220 static void	moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo);
221 static struct	pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
222 
223 /*
224  * Utility routines.
225  */
226 static boolean_t	moea64_query_bit(mmu_t, vm_page_t, uint64_t);
227 static u_int		moea64_clear_bit(mmu_t, vm_page_t, uint64_t);
228 static void		moea64_kremove(mmu_t, vm_offset_t);
229 static void		moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
230 			    vm_paddr_t pa, vm_size_t sz);
231 static void		moea64_pmap_init_qpages(void);
232 
233 /*
234  * Kernel MMU interface
235  */
236 void moea64_clear_modify(mmu_t, vm_page_t);
237 void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
238 void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
239     vm_page_t *mb, vm_offset_t b_offset, int xfersize);
240 int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
241     u_int flags, int8_t psind);
242 void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
243     vm_prot_t);
244 void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
245 vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
246 vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
247 void moea64_init(mmu_t);
248 boolean_t moea64_is_modified(mmu_t, vm_page_t);
249 boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
250 boolean_t moea64_is_referenced(mmu_t, vm_page_t);
251 int moea64_ts_referenced(mmu_t, vm_page_t);
252 vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
253 boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
254 void moea64_page_init(mmu_t, vm_page_t);
255 int moea64_page_wired_mappings(mmu_t, vm_page_t);
256 void moea64_pinit(mmu_t, pmap_t);
257 void moea64_pinit0(mmu_t, pmap_t);
258 void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
259 void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
260 void moea64_qremove(mmu_t, vm_offset_t, int);
261 void moea64_release(mmu_t, pmap_t);
262 void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
263 void moea64_remove_pages(mmu_t, pmap_t);
264 void moea64_remove_all(mmu_t, vm_page_t);
265 void moea64_remove_write(mmu_t, vm_page_t);
266 void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
267 void moea64_zero_page(mmu_t, vm_page_t);
268 void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
269 void moea64_activate(mmu_t, struct thread *);
270 void moea64_deactivate(mmu_t, struct thread *);
271 void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
272 void *moea64_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
273 void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
274 vm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
275 void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
276 void moea64_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma);
277 void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
278 boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
279 static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
280 void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
281     void **va);
282 void moea64_scan_init(mmu_t mmu);
283 vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m);
284 void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr);
285 
286 static mmu_method_t moea64_methods[] = {
287 	MMUMETHOD(mmu_clear_modify,	moea64_clear_modify),
288 	MMUMETHOD(mmu_copy_page,	moea64_copy_page),
289 	MMUMETHOD(mmu_copy_pages,	moea64_copy_pages),
290 	MMUMETHOD(mmu_enter,		moea64_enter),
291 	MMUMETHOD(mmu_enter_object,	moea64_enter_object),
292 	MMUMETHOD(mmu_enter_quick,	moea64_enter_quick),
293 	MMUMETHOD(mmu_extract,		moea64_extract),
294 	MMUMETHOD(mmu_extract_and_hold,	moea64_extract_and_hold),
295 	MMUMETHOD(mmu_init,		moea64_init),
296 	MMUMETHOD(mmu_is_modified,	moea64_is_modified),
297 	MMUMETHOD(mmu_is_prefaultable,	moea64_is_prefaultable),
298 	MMUMETHOD(mmu_is_referenced,	moea64_is_referenced),
299 	MMUMETHOD(mmu_ts_referenced,	moea64_ts_referenced),
300 	MMUMETHOD(mmu_map,     		moea64_map),
301 	MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
302 	MMUMETHOD(mmu_page_init,	moea64_page_init),
303 	MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
304 	MMUMETHOD(mmu_pinit,		moea64_pinit),
305 	MMUMETHOD(mmu_pinit0,		moea64_pinit0),
306 	MMUMETHOD(mmu_protect,		moea64_protect),
307 	MMUMETHOD(mmu_qenter,		moea64_qenter),
308 	MMUMETHOD(mmu_qremove,		moea64_qremove),
309 	MMUMETHOD(mmu_release,		moea64_release),
310 	MMUMETHOD(mmu_remove,		moea64_remove),
311 	MMUMETHOD(mmu_remove_pages,	moea64_remove_pages),
312 	MMUMETHOD(mmu_remove_all,      	moea64_remove_all),
313 	MMUMETHOD(mmu_remove_write,	moea64_remove_write),
314 	MMUMETHOD(mmu_sync_icache,	moea64_sync_icache),
315 	MMUMETHOD(mmu_unwire,		moea64_unwire),
316 	MMUMETHOD(mmu_zero_page,       	moea64_zero_page),
317 	MMUMETHOD(mmu_zero_page_area,	moea64_zero_page_area),
318 	MMUMETHOD(mmu_activate,		moea64_activate),
319 	MMUMETHOD(mmu_deactivate,      	moea64_deactivate),
320 	MMUMETHOD(mmu_page_set_memattr,	moea64_page_set_memattr),
321 	MMUMETHOD(mmu_quick_enter_page, moea64_quick_enter_page),
322 	MMUMETHOD(mmu_quick_remove_page, moea64_quick_remove_page),
323 
324 	/* Internal interfaces */
325 	MMUMETHOD(mmu_mapdev,		moea64_mapdev),
326 	MMUMETHOD(mmu_mapdev_attr,	moea64_mapdev_attr),
327 	MMUMETHOD(mmu_unmapdev,		moea64_unmapdev),
328 	MMUMETHOD(mmu_kextract,		moea64_kextract),
329 	MMUMETHOD(mmu_kenter,		moea64_kenter),
330 	MMUMETHOD(mmu_kenter_attr,	moea64_kenter_attr),
331 	MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
332 	MMUMETHOD(mmu_scan_init,	moea64_scan_init),
333 	MMUMETHOD(mmu_dumpsys_map,	moea64_dumpsys_map),
334 
335 	{ 0, 0 }
336 };
337 
338 MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
339 
340 static struct pvo_head *
341 vm_page_to_pvoh(vm_page_t m)
342 {
343 
344 	mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED);
345 	return (&m->md.mdpg_pvoh);
346 }
347 
348 static struct pvo_entry *
349 alloc_pvo_entry(int bootstrap)
350 {
351 	struct pvo_entry *pvo;
352 
353 	if (!moea64_initialized || bootstrap) {
354 		if (moea64_bpvo_pool_index >= moea64_bpvo_pool_size) {
355 			panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd",
356 			      moea64_bpvo_pool_index, moea64_bpvo_pool_size,
357 			      moea64_bpvo_pool_size * sizeof(struct pvo_entry));
358 		}
359 		pvo = &moea64_bpvo_pool[
360 		    atomic_fetchadd_int(&moea64_bpvo_pool_index, 1)];
361 		bzero(pvo, sizeof(*pvo));
362 		pvo->pvo_vaddr = PVO_BOOTSTRAP;
363 	} else {
364 		pvo = uma_zalloc(moea64_pvo_zone, M_NOWAIT);
365 		bzero(pvo, sizeof(*pvo));
366 	}
367 
368 	return (pvo);
369 }
370 
371 
372 static void
373 init_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va)
374 {
375 	uint64_t vsid;
376 	uint64_t hash;
377 	int shift;
378 
379 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
380 
381 	pvo->pvo_pmap = pmap;
382 	va &= ~ADDR_POFF;
383 	pvo->pvo_vaddr |= va;
384 	vsid = va_to_vsid(pmap, va);
385 	pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
386 	    | (vsid << 16);
387 
388 	shift = (pvo->pvo_vaddr & PVO_LARGE) ? moea64_large_page_shift :
389 	    ADDR_PIDX_SHFT;
390 	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift);
391 	pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3;
392 }
393 
394 static void
395 free_pvo_entry(struct pvo_entry *pvo)
396 {
397 
398 	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
399 		uma_zfree(moea64_pvo_zone, pvo);
400 }
401 
402 void
403 moea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte)
404 {
405 
406 	lpte->pte_hi = (pvo->pvo_vpn >> (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) &
407 	    LPTE_AVPN_MASK;
408 	lpte->pte_hi |= LPTE_VALID;
409 
410 	if (pvo->pvo_vaddr & PVO_LARGE)
411 		lpte->pte_hi |= LPTE_BIG;
412 	if (pvo->pvo_vaddr & PVO_WIRED)
413 		lpte->pte_hi |= LPTE_WIRED;
414 	if (pvo->pvo_vaddr & PVO_HID)
415 		lpte->pte_hi |= LPTE_HID;
416 
417 	lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */
418 	if (pvo->pvo_pte.prot & VM_PROT_WRITE)
419 		lpte->pte_lo |= LPTE_BW;
420 	else
421 		lpte->pte_lo |= LPTE_BR;
422 
423 	if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE))
424 		lpte->pte_lo |= LPTE_NOEXEC;
425 }
426 
427 static __inline uint64_t
428 moea64_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
429 {
430 	uint64_t pte_lo;
431 	int i;
432 
433 	if (ma != VM_MEMATTR_DEFAULT) {
434 		switch (ma) {
435 		case VM_MEMATTR_UNCACHEABLE:
436 			return (LPTE_I | LPTE_G);
437 		case VM_MEMATTR_CACHEABLE:
438 			return (LPTE_M);
439 		case VM_MEMATTR_WRITE_COMBINING:
440 		case VM_MEMATTR_WRITE_BACK:
441 		case VM_MEMATTR_PREFETCHABLE:
442 			return (LPTE_I);
443 		case VM_MEMATTR_WRITE_THROUGH:
444 			return (LPTE_W | LPTE_M);
445 		}
446 	}
447 
448 	/*
449 	 * Assume the page is cache inhibited and access is guarded unless
450 	 * it's in our available memory array.
451 	 */
452 	pte_lo = LPTE_I | LPTE_G;
453 	for (i = 0; i < pregions_sz; i++) {
454 		if ((pa >= pregions[i].mr_start) &&
455 		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
456 			pte_lo &= ~(LPTE_I | LPTE_G);
457 			pte_lo |= LPTE_M;
458 			break;
459 		}
460 	}
461 
462 	return pte_lo;
463 }
464 
465 /*
466  * Quick sort callout for comparing memory regions.
467  */
468 static int	om_cmp(const void *a, const void *b);
469 
470 static int
471 om_cmp(const void *a, const void *b)
472 {
473 	const struct	ofw_map *mapa;
474 	const struct	ofw_map *mapb;
475 
476 	mapa = a;
477 	mapb = b;
478 	if (mapa->om_pa < mapb->om_pa)
479 		return (-1);
480 	else if (mapa->om_pa > mapb->om_pa)
481 		return (1);
482 	else
483 		return (0);
484 }
485 
486 static void
487 moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
488 {
489 	struct ofw_map	translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */
490 	pcell_t		acells, trans_cells[sz/sizeof(cell_t)];
491 	struct pvo_entry *pvo;
492 	register_t	msr;
493 	vm_offset_t	off;
494 	vm_paddr_t	pa_base;
495 	int		i, j;
496 
497 	bzero(translations, sz);
498 	OF_getencprop(OF_finddevice("/"), "#address-cells", &acells,
499 	    sizeof(acells));
500 	if (OF_getencprop(mmu, "translations", trans_cells, sz) == -1)
501 		panic("moea64_bootstrap: can't get ofw translations");
502 
503 	CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
504 	sz /= sizeof(cell_t);
505 	for (i = 0, j = 0; i < sz; j++) {
506 		translations[j].om_va = trans_cells[i++];
507 		translations[j].om_len = trans_cells[i++];
508 		translations[j].om_pa = trans_cells[i++];
509 		if (acells == 2) {
510 			translations[j].om_pa <<= 32;
511 			translations[j].om_pa |= trans_cells[i++];
512 		}
513 		translations[j].om_mode = trans_cells[i++];
514 	}
515 	KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)",
516 	    i, sz));
517 
518 	sz = j;
519 	qsort(translations, sz, sizeof (*translations), om_cmp);
520 
521 	for (i = 0; i < sz; i++) {
522 		pa_base = translations[i].om_pa;
523 	      #ifndef __powerpc64__
524 		if ((translations[i].om_pa >> 32) != 0)
525 			panic("OFW translations above 32-bit boundary!");
526 	      #endif
527 
528 		if (pa_base % PAGE_SIZE)
529 			panic("OFW translation not page-aligned (phys)!");
530 		if (translations[i].om_va % PAGE_SIZE)
531 			panic("OFW translation not page-aligned (virt)!");
532 
533 		CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x",
534 		    pa_base, translations[i].om_va, translations[i].om_len);
535 
536 		/* Now enter the pages for this mapping */
537 
538 		DISABLE_TRANS(msr);
539 		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
540 			/* If this address is direct-mapped, skip remapping */
541 			if (hw_direct_map && translations[i].om_va == pa_base &&
542 			    moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT) 			    == LPTE_M)
543 				continue;
544 
545 			PMAP_LOCK(kernel_pmap);
546 			pvo = moea64_pvo_find_va(kernel_pmap,
547 			    translations[i].om_va + off);
548 			PMAP_UNLOCK(kernel_pmap);
549 			if (pvo != NULL)
550 				continue;
551 
552 			moea64_kenter(mmup, translations[i].om_va + off,
553 			    pa_base + off);
554 		}
555 		ENABLE_TRANS(msr);
556 	}
557 }
558 
559 #ifdef __powerpc64__
560 static void
561 moea64_probe_large_page(void)
562 {
563 	uint16_t pvr = mfpvr() >> 16;
564 
565 	switch (pvr) {
566 	case IBM970:
567 	case IBM970FX:
568 	case IBM970MP:
569 		powerpc_sync(); isync();
570 		mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
571 		powerpc_sync(); isync();
572 
573 		/* FALLTHROUGH */
574 	default:
575 		moea64_large_page_size = 0x1000000; /* 16 MB */
576 		moea64_large_page_shift = 24;
577 	}
578 
579 	moea64_large_page_mask = moea64_large_page_size - 1;
580 }
581 
582 static void
583 moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
584 {
585 	struct slb *cache;
586 	struct slb entry;
587 	uint64_t esid, slbe;
588 	uint64_t i;
589 
590 	cache = PCPU_GET(slb);
591 	esid = va >> ADDR_SR_SHFT;
592 	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
593 
594 	for (i = 0; i < 64; i++) {
595 		if (cache[i].slbe == (slbe | i))
596 			return;
597 	}
598 
599 	entry.slbe = slbe;
600 	entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
601 	if (large)
602 		entry.slbv |= SLBV_L;
603 
604 	slb_insert_kernel(entry.slbe, entry.slbv);
605 }
606 #endif
607 
608 static void
609 moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
610     vm_offset_t kernelend)
611 {
612 	struct pvo_entry *pvo;
613 	register_t msr;
614 	vm_paddr_t pa;
615 	vm_offset_t size, off;
616 	uint64_t pte_lo;
617 	int i;
618 
619 	if (moea64_large_page_size == 0)
620 		hw_direct_map = 0;
621 
622 	DISABLE_TRANS(msr);
623 	if (hw_direct_map) {
624 		PMAP_LOCK(kernel_pmap);
625 		for (i = 0; i < pregions_sz; i++) {
626 		  for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
627 		     pregions[i].mr_size; pa += moea64_large_page_size) {
628 			pte_lo = LPTE_M;
629 
630 			pvo = alloc_pvo_entry(1 /* bootstrap */);
631 			pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE;
632 			init_pvo_entry(pvo, kernel_pmap, pa);
633 
634 			/*
635 			 * Set memory access as guarded if prefetch within
636 			 * the page could exit the available physmem area.
637 			 */
638 			if (pa & moea64_large_page_mask) {
639 				pa &= moea64_large_page_mask;
640 				pte_lo |= LPTE_G;
641 			}
642 			if (pa + moea64_large_page_size >
643 			    pregions[i].mr_start + pregions[i].mr_size)
644 				pte_lo |= LPTE_G;
645 
646 			pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE |
647 			    VM_PROT_EXECUTE;
648 			pvo->pvo_pte.pa = pa | pte_lo;
649 			moea64_pvo_enter(mmup, pvo, NULL);
650 		  }
651 		}
652 		PMAP_UNLOCK(kernel_pmap);
653 	} else {
654 		size = moea64_bpvo_pool_size*sizeof(struct pvo_entry);
655 		off = (vm_offset_t)(moea64_bpvo_pool);
656 		for (pa = off; pa < off + size; pa += PAGE_SIZE)
657 		moea64_kenter(mmup, pa, pa);
658 
659 		/*
660 		 * Map certain important things, like ourselves.
661 		 *
662 		 * NOTE: We do not map the exception vector space. That code is
663 		 * used only in real mode, and leaving it unmapped allows us to
664 		 * catch NULL pointer deferences, instead of making NULL a valid
665 		 * address.
666 		 */
667 
668 		for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
669 		    pa += PAGE_SIZE)
670 			moea64_kenter(mmup, pa, pa);
671 	}
672 	ENABLE_TRANS(msr);
673 
674 	/*
675 	 * Allow user to override unmapped_buf_allowed for testing.
676 	 * XXXKIB Only direct map implementation was tested.
677 	 */
678 	if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed",
679 	    &unmapped_buf_allowed))
680 		unmapped_buf_allowed = hw_direct_map;
681 }
682 
683 void
684 moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
685 {
686 	int		i, j;
687 	vm_size_t	physsz, hwphyssz;
688 
689 #ifndef __powerpc64__
690 	/* We don't have a direct map since there is no BAT */
691 	hw_direct_map = 0;
692 
693 	/* Make sure battable is zero, since we have no BAT */
694 	for (i = 0; i < 16; i++) {
695 		battable[i].batu = 0;
696 		battable[i].batl = 0;
697 	}
698 #else
699 	moea64_probe_large_page();
700 
701 	/* Use a direct map if we have large page support */
702 	if (moea64_large_page_size > 0)
703 		hw_direct_map = 1;
704 	else
705 		hw_direct_map = 0;
706 #endif
707 
708 	/* Get physical memory regions from firmware */
709 	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
710 	CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
711 
712 	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
713 		panic("moea64_bootstrap: phys_avail too small");
714 
715 	phys_avail_count = 0;
716 	physsz = 0;
717 	hwphyssz = 0;
718 	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
719 	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
720 		CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)",
721 		    regions[i].mr_start, regions[i].mr_start +
722 		    regions[i].mr_size, regions[i].mr_size);
723 		if (hwphyssz != 0 &&
724 		    (physsz + regions[i].mr_size) >= hwphyssz) {
725 			if (physsz < hwphyssz) {
726 				phys_avail[j] = regions[i].mr_start;
727 				phys_avail[j + 1] = regions[i].mr_start +
728 				    hwphyssz - physsz;
729 				physsz = hwphyssz;
730 				phys_avail_count++;
731 			}
732 			break;
733 		}
734 		phys_avail[j] = regions[i].mr_start;
735 		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
736 		phys_avail_count++;
737 		physsz += regions[i].mr_size;
738 	}
739 
740 	/* Check for overlap with the kernel and exception vectors */
741 	for (j = 0; j < 2*phys_avail_count; j+=2) {
742 		if (phys_avail[j] < EXC_LAST)
743 			phys_avail[j] += EXC_LAST;
744 
745 		if (kernelstart >= phys_avail[j] &&
746 		    kernelstart < phys_avail[j+1]) {
747 			if (kernelend < phys_avail[j+1]) {
748 				phys_avail[2*phys_avail_count] =
749 				    (kernelend & ~PAGE_MASK) + PAGE_SIZE;
750 				phys_avail[2*phys_avail_count + 1] =
751 				    phys_avail[j+1];
752 				phys_avail_count++;
753 			}
754 
755 			phys_avail[j+1] = kernelstart & ~PAGE_MASK;
756 		}
757 
758 		if (kernelend >= phys_avail[j] &&
759 		    kernelend < phys_avail[j+1]) {
760 			if (kernelstart > phys_avail[j]) {
761 				phys_avail[2*phys_avail_count] = phys_avail[j];
762 				phys_avail[2*phys_avail_count + 1] =
763 				    kernelstart & ~PAGE_MASK;
764 				phys_avail_count++;
765 			}
766 
767 			phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
768 		}
769 	}
770 
771 	physmem = btoc(physsz);
772 
773 #ifdef PTEGCOUNT
774 	moea64_pteg_count = PTEGCOUNT;
775 #else
776 	moea64_pteg_count = 0x1000;
777 
778 	while (moea64_pteg_count < physmem)
779 		moea64_pteg_count <<= 1;
780 
781 	moea64_pteg_count >>= 1;
782 #endif /* PTEGCOUNT */
783 }
784 
785 void
786 moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
787 {
788 	int		i;
789 
790 	/*
791 	 * Set PTEG mask
792 	 */
793 	moea64_pteg_mask = moea64_pteg_count - 1;
794 
795 	/*
796 	 * Initialize SLB table lock and page locks
797 	 */
798 	mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
799 	for (i = 0; i < PV_LOCK_COUNT; i++)
800 		mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF);
801 
802 	/*
803 	 * Initialise the bootstrap pvo pool.
804 	 */
805 	moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
806 		moea64_bpvo_pool_size*sizeof(struct pvo_entry), 0);
807 	moea64_bpvo_pool_index = 0;
808 
809 	/*
810 	 * Make sure kernel vsid is allocated as well as VSID 0.
811 	 */
812 	#ifndef __powerpc64__
813 	moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
814 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
815 	moea64_vsid_bitmap[0] |= 1;
816 	#endif
817 
818 	/*
819 	 * Initialize the kernel pmap (which is statically allocated).
820 	 */
821 	#ifdef __powerpc64__
822 	for (i = 0; i < 64; i++) {
823 		pcpup->pc_slb[i].slbv = 0;
824 		pcpup->pc_slb[i].slbe = 0;
825 	}
826 	#else
827 	for (i = 0; i < 16; i++)
828 		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
829 	#endif
830 
831 	kernel_pmap->pmap_phys = kernel_pmap;
832 	CPU_FILL(&kernel_pmap->pm_active);
833 	RB_INIT(&kernel_pmap->pmap_pvo);
834 
835 	PMAP_LOCK_INIT(kernel_pmap);
836 
837 	/*
838 	 * Now map in all the other buffers we allocated earlier
839 	 */
840 
841 	moea64_setup_direct_map(mmup, kernelstart, kernelend);
842 }
843 
844 void
845 moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
846 {
847 	ihandle_t	mmui;
848 	phandle_t	chosen;
849 	phandle_t	mmu;
850 	ssize_t		sz;
851 	int		i;
852 	vm_offset_t	pa, va;
853 	void		*dpcpu;
854 
855 	/*
856 	 * Set up the Open Firmware pmap and add its mappings if not in real
857 	 * mode.
858 	 */
859 
860 	chosen = OF_finddevice("/chosen");
861 	if (chosen != -1 && OF_getencprop(chosen, "mmu", &mmui, 4) != -1) {
862 		mmu = OF_instance_to_package(mmui);
863 		if (mmu == -1 ||
864 		    (sz = OF_getproplen(mmu, "translations")) == -1)
865 			sz = 0;
866 		if (sz > 6144 /* tmpstksz - 2 KB headroom */)
867 			panic("moea64_bootstrap: too many ofw translations");
868 
869 		if (sz > 0)
870 			moea64_add_ofw_mappings(mmup, mmu, sz);
871 	}
872 
873 	/*
874 	 * Calculate the last available physical address.
875 	 */
876 	for (i = 0; phys_avail[i + 2] != 0; i += 2)
877 		;
878 	Maxmem = powerpc_btop(phys_avail[i + 1]);
879 
880 	/*
881 	 * Initialize MMU and remap early physical mappings
882 	 */
883 	MMU_CPU_BOOTSTRAP(mmup,0);
884 	mtmsr(mfmsr() | PSL_DR | PSL_IR);
885 	pmap_bootstrapped++;
886 	bs_remap_earlyboot();
887 
888 	/*
889 	 * Set the start and end of kva.
890 	 */
891 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
892 	virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
893 
894 	/*
895 	 * Map the entire KVA range into the SLB. We must not fault there.
896 	 */
897 	#ifdef __powerpc64__
898 	for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
899 		moea64_bootstrap_slb_prefault(va, 0);
900 	#endif
901 
902 	/*
903 	 * Figure out how far we can extend virtual_end into segment 16
904 	 * without running into existing mappings. Segment 16 is guaranteed
905 	 * to contain neither RAM nor devices (at least on Apple hardware),
906 	 * but will generally contain some OFW mappings we should not
907 	 * step on.
908 	 */
909 
910 	#ifndef __powerpc64__	/* KVA is in high memory on PPC64 */
911 	PMAP_LOCK(kernel_pmap);
912 	while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
913 	    moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
914 		virtual_end += PAGE_SIZE;
915 	PMAP_UNLOCK(kernel_pmap);
916 	#endif
917 
918 	/*
919 	 * Allocate a kernel stack with a guard page for thread0 and map it
920 	 * into the kernel page map.
921 	 */
922 	pa = moea64_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE);
923 	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
924 	virtual_avail = va + kstack_pages * PAGE_SIZE;
925 	CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
926 	thread0.td_kstack = va;
927 	thread0.td_kstack_pages = kstack_pages;
928 	for (i = 0; i < kstack_pages; i++) {
929 		moea64_kenter(mmup, va, pa);
930 		pa += PAGE_SIZE;
931 		va += PAGE_SIZE;
932 	}
933 
934 	/*
935 	 * Allocate virtual address space for the message buffer.
936 	 */
937 	pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
938 	msgbufp = (struct msgbuf *)virtual_avail;
939 	va = virtual_avail;
940 	virtual_avail += round_page(msgbufsize);
941 	while (va < virtual_avail) {
942 		moea64_kenter(mmup, va, pa);
943 		pa += PAGE_SIZE;
944 		va += PAGE_SIZE;
945 	}
946 
947 	/*
948 	 * Allocate virtual address space for the dynamic percpu area.
949 	 */
950 	pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
951 	dpcpu = (void *)virtual_avail;
952 	va = virtual_avail;
953 	virtual_avail += DPCPU_SIZE;
954 	while (va < virtual_avail) {
955 		moea64_kenter(mmup, va, pa);
956 		pa += PAGE_SIZE;
957 		va += PAGE_SIZE;
958 	}
959 	dpcpu_init(dpcpu, 0);
960 
961 	/*
962 	 * Allocate some things for page zeroing. We put this directly
963 	 * in the page table and use MOEA64_PTE_REPLACE to avoid any
964 	 * of the PVO book-keeping or other parts of the VM system
965 	 * from even knowing that this hack exists.
966 	 */
967 
968 	if (!hw_direct_map) {
969 		mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
970 		    MTX_DEF);
971 		for (i = 0; i < 2; i++) {
972 			moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
973 			virtual_end -= PAGE_SIZE;
974 
975 			moea64_kenter(mmup, moea64_scratchpage_va[i], 0);
976 
977 			PMAP_LOCK(kernel_pmap);
978 			moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
979 			    kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
980 			PMAP_UNLOCK(kernel_pmap);
981 		}
982 	}
983 }
984 
985 static void
986 moea64_pmap_init_qpages(void)
987 {
988 	struct pcpu *pc;
989 	int i;
990 
991 	if (hw_direct_map)
992 		return;
993 
994 	CPU_FOREACH(i) {
995 		pc = pcpu_find(i);
996 		pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
997 		if (pc->pc_qmap_addr == 0)
998 			panic("pmap_init_qpages: unable to allocate KVA");
999 		PMAP_LOCK(kernel_pmap);
1000 		pc->pc_qmap_pvo = moea64_pvo_find_va(kernel_pmap, pc->pc_qmap_addr);
1001 		PMAP_UNLOCK(kernel_pmap);
1002 		mtx_init(&pc->pc_qmap_lock, "qmap lock", NULL, MTX_DEF);
1003 	}
1004 }
1005 
1006 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, moea64_pmap_init_qpages, NULL);
1007 
1008 /*
1009  * Activate a user pmap.  This mostly involves setting some non-CPU
1010  * state.
1011  */
1012 void
1013 moea64_activate(mmu_t mmu, struct thread *td)
1014 {
1015 	pmap_t	pm;
1016 
1017 	pm = &td->td_proc->p_vmspace->vm_pmap;
1018 	CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
1019 
1020 	#ifdef __powerpc64__
1021 	PCPU_SET(userslb, pm->pm_slb);
1022 	__asm __volatile("slbmte %0, %1; isync" ::
1023 	    "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
1024 	#else
1025 	PCPU_SET(curpmap, pm->pmap_phys);
1026 	mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid);
1027 	#endif
1028 }
1029 
1030 void
1031 moea64_deactivate(mmu_t mmu, struct thread *td)
1032 {
1033 	pmap_t	pm;
1034 
1035 	__asm __volatile("isync; slbie %0" :: "r"(USER_ADDR));
1036 
1037 	pm = &td->td_proc->p_vmspace->vm_pmap;
1038 	CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
1039 	#ifdef __powerpc64__
1040 	PCPU_SET(userslb, NULL);
1041 	#else
1042 	PCPU_SET(curpmap, NULL);
1043 	#endif
1044 }
1045 
1046 void
1047 moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1048 {
1049 	struct	pvo_entry key, *pvo;
1050 	vm_page_t m;
1051 	int64_t	refchg;
1052 
1053 	key.pvo_vaddr = sva;
1054 	PMAP_LOCK(pm);
1055 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1056 	    pvo != NULL && PVO_VADDR(pvo) < eva;
1057 	    pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
1058 		if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1059 			panic("moea64_unwire: pvo %p is missing PVO_WIRED",
1060 			    pvo);
1061 		pvo->pvo_vaddr &= ~PVO_WIRED;
1062 		refchg = MOEA64_PTE_REPLACE(mmu, pvo, 0 /* No invalidation */);
1063 		if ((pvo->pvo_vaddr & PVO_MANAGED) &&
1064 		    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1065 			if (refchg < 0)
1066 				refchg = LPTE_CHG;
1067 			m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
1068 
1069 			refchg |= atomic_readandclear_32(&m->md.mdpg_attrs);
1070 			if (refchg & LPTE_CHG)
1071 				vm_page_dirty(m);
1072 			if (refchg & LPTE_REF)
1073 				vm_page_aflag_set(m, PGA_REFERENCED);
1074 		}
1075 		pm->pm_stats.wired_count--;
1076 	}
1077 	PMAP_UNLOCK(pm);
1078 }
1079 
1080 /*
1081  * This goes through and sets the physical address of our
1082  * special scratch PTE to the PA we want to zero or copy. Because
1083  * of locking issues (this can get called in pvo_enter() by
1084  * the UMA allocator), we can't use most other utility functions here
1085  */
1086 
1087 static __inline
1088 void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa) {
1089 
1090 	KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1091 	mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1092 
1093 	moea64_scratchpage_pvo[which]->pvo_pte.pa =
1094 	    moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1095 	MOEA64_PTE_REPLACE(mmup, moea64_scratchpage_pvo[which],
1096 	    MOEA64_PTE_INVALIDATE);
1097 	isync();
1098 }
1099 
1100 void
1101 moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1102 {
1103 	vm_offset_t	dst;
1104 	vm_offset_t	src;
1105 
1106 	dst = VM_PAGE_TO_PHYS(mdst);
1107 	src = VM_PAGE_TO_PHYS(msrc);
1108 
1109 	if (hw_direct_map) {
1110 		bcopy((void *)src, (void *)dst, PAGE_SIZE);
1111 	} else {
1112 		mtx_lock(&moea64_scratchpage_mtx);
1113 
1114 		moea64_set_scratchpage_pa(mmu, 0, src);
1115 		moea64_set_scratchpage_pa(mmu, 1, dst);
1116 
1117 		bcopy((void *)moea64_scratchpage_va[0],
1118 		    (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1119 
1120 		mtx_unlock(&moea64_scratchpage_mtx);
1121 	}
1122 }
1123 
1124 static inline void
1125 moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1126     vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1127 {
1128 	void *a_cp, *b_cp;
1129 	vm_offset_t a_pg_offset, b_pg_offset;
1130 	int cnt;
1131 
1132 	while (xfersize > 0) {
1133 		a_pg_offset = a_offset & PAGE_MASK;
1134 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1135 		a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
1136 		    a_pg_offset;
1137 		b_pg_offset = b_offset & PAGE_MASK;
1138 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1139 		b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
1140 		    b_pg_offset;
1141 		bcopy(a_cp, b_cp, cnt);
1142 		a_offset += cnt;
1143 		b_offset += cnt;
1144 		xfersize -= cnt;
1145 	}
1146 }
1147 
1148 static inline void
1149 moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1150     vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1151 {
1152 	void *a_cp, *b_cp;
1153 	vm_offset_t a_pg_offset, b_pg_offset;
1154 	int cnt;
1155 
1156 	mtx_lock(&moea64_scratchpage_mtx);
1157 	while (xfersize > 0) {
1158 		a_pg_offset = a_offset & PAGE_MASK;
1159 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1160 		moea64_set_scratchpage_pa(mmu, 0,
1161 		    VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
1162 		a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
1163 		b_pg_offset = b_offset & PAGE_MASK;
1164 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1165 		moea64_set_scratchpage_pa(mmu, 1,
1166 		    VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
1167 		b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
1168 		bcopy(a_cp, b_cp, cnt);
1169 		a_offset += cnt;
1170 		b_offset += cnt;
1171 		xfersize -= cnt;
1172 	}
1173 	mtx_unlock(&moea64_scratchpage_mtx);
1174 }
1175 
1176 void
1177 moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1178     vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1179 {
1180 
1181 	if (hw_direct_map) {
1182 		moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset,
1183 		    xfersize);
1184 	} else {
1185 		moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
1186 		    xfersize);
1187 	}
1188 }
1189 
1190 void
1191 moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1192 {
1193 	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1194 
1195 	if (size + off > PAGE_SIZE)
1196 		panic("moea64_zero_page: size + off > PAGE_SIZE");
1197 
1198 	if (hw_direct_map) {
1199 		bzero((caddr_t)pa + off, size);
1200 	} else {
1201 		mtx_lock(&moea64_scratchpage_mtx);
1202 		moea64_set_scratchpage_pa(mmu, 0, pa);
1203 		bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1204 		mtx_unlock(&moea64_scratchpage_mtx);
1205 	}
1206 }
1207 
1208 /*
1209  * Zero a page of physical memory by temporarily mapping it
1210  */
1211 void
1212 moea64_zero_page(mmu_t mmu, vm_page_t m)
1213 {
1214 	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1215 	vm_offset_t va, off;
1216 
1217 	if (!hw_direct_map) {
1218 		mtx_lock(&moea64_scratchpage_mtx);
1219 
1220 		moea64_set_scratchpage_pa(mmu, 0, pa);
1221 		va = moea64_scratchpage_va[0];
1222 	} else {
1223 		va = pa;
1224 	}
1225 
1226 	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1227 		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
1228 
1229 	if (!hw_direct_map)
1230 		mtx_unlock(&moea64_scratchpage_mtx);
1231 }
1232 
1233 vm_offset_t
1234 moea64_quick_enter_page(mmu_t mmu, vm_page_t m)
1235 {
1236 	struct pvo_entry *pvo;
1237 	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1238 
1239 	if (hw_direct_map)
1240 		return (pa);
1241 
1242 	/*
1243  	 * MOEA64_PTE_REPLACE does some locking, so we can't just grab
1244 	 * a critical section and access the PCPU data like on i386.
1245 	 * Instead, pin the thread and grab the PCPU lock to prevent
1246 	 * a preempting thread from using the same PCPU data.
1247 	 */
1248 	sched_pin();
1249 
1250 	mtx_assert(PCPU_PTR(qmap_lock), MA_NOTOWNED);
1251 	pvo = PCPU_GET(qmap_pvo);
1252 
1253 	mtx_lock(PCPU_PTR(qmap_lock));
1254 	pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) |
1255 	    (uint64_t)pa;
1256 	MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_INVALIDATE);
1257 	isync();
1258 
1259 	return (PCPU_GET(qmap_addr));
1260 }
1261 
1262 void
1263 moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr)
1264 {
1265 	if (hw_direct_map)
1266 		return;
1267 
1268 	mtx_assert(PCPU_PTR(qmap_lock), MA_OWNED);
1269 	KASSERT(PCPU_GET(qmap_addr) == addr,
1270 	    ("moea64_quick_remove_page: invalid address"));
1271 	mtx_unlock(PCPU_PTR(qmap_lock));
1272 	sched_unpin();
1273 }
1274 
1275 /*
1276  * Map the given physical page at the specified virtual address in the
1277  * target pmap with the protection requested.  If specified the page
1278  * will be wired down.
1279  */
1280 
1281 int
1282 moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1283     vm_prot_t prot, u_int flags, int8_t psind)
1284 {
1285 	struct		pvo_entry *pvo, *oldpvo;
1286 	struct		pvo_head *pvo_head;
1287 	uint64_t	pte_lo;
1288 	int		error;
1289 
1290 	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
1291 		VM_OBJECT_ASSERT_LOCKED(m->object);
1292 
1293 	pvo = alloc_pvo_entry(0);
1294 	pvo->pvo_pmap = NULL; /* to be filled in later */
1295 	pvo->pvo_pte.prot = prot;
1296 
1297 	pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
1298 	pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | pte_lo;
1299 
1300 	if ((flags & PMAP_ENTER_WIRED) != 0)
1301 		pvo->pvo_vaddr |= PVO_WIRED;
1302 
1303 	if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) {
1304 		pvo_head = NULL;
1305 	} else {
1306 		pvo_head = &m->md.mdpg_pvoh;
1307 		pvo->pvo_vaddr |= PVO_MANAGED;
1308 	}
1309 
1310 	for (;;) {
1311 		PV_PAGE_LOCK(m);
1312 		PMAP_LOCK(pmap);
1313 		if (pvo->pvo_pmap == NULL)
1314 			init_pvo_entry(pvo, pmap, va);
1315 		if (prot & VM_PROT_WRITE)
1316 			if (pmap_bootstrapped &&
1317 			    (m->oflags & VPO_UNMANAGED) == 0)
1318 				vm_page_aflag_set(m, PGA_WRITEABLE);
1319 
1320 		oldpvo = moea64_pvo_find_va(pmap, va);
1321 		if (oldpvo != NULL) {
1322 			if (oldpvo->pvo_vaddr == pvo->pvo_vaddr &&
1323 			    oldpvo->pvo_pte.pa == pvo->pvo_pte.pa &&
1324 			    oldpvo->pvo_pte.prot == prot) {
1325 				/* Identical mapping already exists */
1326 				error = 0;
1327 
1328 				/* If not in page table, reinsert it */
1329 				if (MOEA64_PTE_SYNCH(mmu, oldpvo) < 0) {
1330 					moea64_pte_overflow--;
1331 					MOEA64_PTE_INSERT(mmu, oldpvo);
1332 				}
1333 
1334 				/* Then just clean up and go home */
1335 				PV_PAGE_UNLOCK(m);
1336 				PMAP_UNLOCK(pmap);
1337 				free_pvo_entry(pvo);
1338 				break;
1339 			}
1340 
1341 			/* Otherwise, need to kill it first */
1342 			KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old "
1343 			    "mapping does not match new mapping"));
1344 			moea64_pvo_remove_from_pmap(mmu, oldpvo);
1345 		}
1346 		error = moea64_pvo_enter(mmu, pvo, pvo_head);
1347 		PV_PAGE_UNLOCK(m);
1348 		PMAP_UNLOCK(pmap);
1349 
1350 		/* Free any dead pages */
1351 		if (oldpvo != NULL) {
1352 			PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1353 			moea64_pvo_remove_from_page(mmu, oldpvo);
1354 			PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1355 			free_pvo_entry(oldpvo);
1356 		}
1357 
1358 		if (error != ENOMEM)
1359 			break;
1360 		if ((flags & PMAP_ENTER_NOSLEEP) != 0)
1361 			return (KERN_RESOURCE_SHORTAGE);
1362 		VM_OBJECT_ASSERT_UNLOCKED(m->object);
1363 		VM_WAIT;
1364 	}
1365 
1366 	/*
1367 	 * Flush the page from the instruction cache if this page is
1368 	 * mapped executable and cacheable.
1369 	 */
1370 	if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) &&
1371 	    (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1372 		vm_page_aflag_set(m, PGA_EXECUTABLE);
1373 		moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1374 	}
1375 	return (KERN_SUCCESS);
1376 }
1377 
1378 static void
1379 moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1380     vm_size_t sz)
1381 {
1382 
1383 	/*
1384 	 * This is much trickier than on older systems because
1385 	 * we can't sync the icache on physical addresses directly
1386 	 * without a direct map. Instead we check a couple of cases
1387 	 * where the memory is already mapped in and, failing that,
1388 	 * use the same trick we use for page zeroing to create
1389 	 * a temporary mapping for this physical address.
1390 	 */
1391 
1392 	if (!pmap_bootstrapped) {
1393 		/*
1394 		 * If PMAP is not bootstrapped, we are likely to be
1395 		 * in real mode.
1396 		 */
1397 		__syncicache((void *)pa, sz);
1398 	} else if (pmap == kernel_pmap) {
1399 		__syncicache((void *)va, sz);
1400 	} else if (hw_direct_map) {
1401 		__syncicache((void *)pa, sz);
1402 	} else {
1403 		/* Use the scratch page to set up a temp mapping */
1404 
1405 		mtx_lock(&moea64_scratchpage_mtx);
1406 
1407 		moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF);
1408 		__syncicache((void *)(moea64_scratchpage_va[1] +
1409 		    (va & ADDR_POFF)), sz);
1410 
1411 		mtx_unlock(&moea64_scratchpage_mtx);
1412 	}
1413 }
1414 
1415 /*
1416  * Maps a sequence of resident pages belonging to the same object.
1417  * The sequence begins with the given page m_start.  This page is
1418  * mapped at the given virtual address start.  Each subsequent page is
1419  * mapped at a virtual address that is offset from start by the same
1420  * amount as the page is offset from m_start within the object.  The
1421  * last page in the sequence is the page with the largest offset from
1422  * m_start that can be mapped at a virtual address less than the given
1423  * virtual address end.  Not every virtual page between start and end
1424  * is mapped; only those for which a resident page exists with the
1425  * corresponding offset from m_start are mapped.
1426  */
1427 void
1428 moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1429     vm_page_t m_start, vm_prot_t prot)
1430 {
1431 	vm_page_t m;
1432 	vm_pindex_t diff, psize;
1433 
1434 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
1435 
1436 	psize = atop(end - start);
1437 	m = m_start;
1438 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1439 		moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
1440 		    (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0);
1441 		m = TAILQ_NEXT(m, listq);
1442 	}
1443 }
1444 
1445 void
1446 moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1447     vm_prot_t prot)
1448 {
1449 
1450 	moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1451 	    PMAP_ENTER_NOSLEEP, 0);
1452 }
1453 
1454 vm_paddr_t
1455 moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1456 {
1457 	struct	pvo_entry *pvo;
1458 	vm_paddr_t pa;
1459 
1460 	PMAP_LOCK(pm);
1461 	pvo = moea64_pvo_find_va(pm, va);
1462 	if (pvo == NULL)
1463 		pa = 0;
1464 	else
1465 		pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1466 	PMAP_UNLOCK(pm);
1467 
1468 	return (pa);
1469 }
1470 
1471 /*
1472  * Atomically extract and hold the physical page with the given
1473  * pmap and virtual address pair if that mapping permits the given
1474  * protection.
1475  */
1476 vm_page_t
1477 moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1478 {
1479 	struct	pvo_entry *pvo;
1480 	vm_page_t m;
1481         vm_paddr_t pa;
1482 
1483 	m = NULL;
1484 	pa = 0;
1485 	PMAP_LOCK(pmap);
1486 retry:
1487 	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1488 	if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) {
1489 		if (vm_page_pa_tryrelock(pmap,
1490 		    pvo->pvo_pte.pa & LPTE_RPGN, &pa))
1491 			goto retry;
1492 		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
1493 		vm_page_hold(m);
1494 	}
1495 	PA_UNLOCK_COND(pa);
1496 	PMAP_UNLOCK(pmap);
1497 	return (m);
1498 }
1499 
1500 static mmu_t installed_mmu;
1501 
1502 static void *
1503 moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags,
1504     int wait)
1505 {
1506 	struct pvo_entry *pvo;
1507         vm_offset_t va;
1508         vm_page_t m;
1509         int pflags, needed_lock;
1510 
1511 	/*
1512 	 * This entire routine is a horrible hack to avoid bothering kmem
1513 	 * for new KVA addresses. Because this can get called from inside
1514 	 * kmem allocation routines, calling kmem for a new address here
1515 	 * can lead to multiply locking non-recursive mutexes.
1516 	 */
1517 
1518 	*flags = UMA_SLAB_PRIV;
1519 	needed_lock = !PMAP_LOCKED(kernel_pmap);
1520 	pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
1521 
1522         for (;;) {
1523                 m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
1524                 if (m == NULL) {
1525                         if (wait & M_NOWAIT)
1526                                 return (NULL);
1527                         VM_WAIT;
1528                 } else
1529                         break;
1530         }
1531 
1532 	va = VM_PAGE_TO_PHYS(m);
1533 
1534 	pvo = alloc_pvo_entry(1 /* bootstrap */);
1535 
1536 	pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE;
1537 	pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M;
1538 
1539 	if (needed_lock)
1540 		PMAP_LOCK(kernel_pmap);
1541 
1542 	init_pvo_entry(pvo, kernel_pmap, va);
1543 	pvo->pvo_vaddr |= PVO_WIRED;
1544 
1545 	moea64_pvo_enter(installed_mmu, pvo, NULL);
1546 
1547 	if (needed_lock)
1548 		PMAP_UNLOCK(kernel_pmap);
1549 
1550 	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1551                 bzero((void *)va, PAGE_SIZE);
1552 
1553 	return (void *)va;
1554 }
1555 
1556 extern int elf32_nxstack;
1557 
1558 void
1559 moea64_init(mmu_t mmu)
1560 {
1561 
1562 	CTR0(KTR_PMAP, "moea64_init");
1563 
1564 	moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1565 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1566 	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1567 
1568 	if (!hw_direct_map) {
1569 		installed_mmu = mmu;
1570 		uma_zone_set_allocf(moea64_pvo_zone,moea64_uma_page_alloc);
1571 	}
1572 
1573 #ifdef COMPAT_FREEBSD32
1574 	elf32_nxstack = 1;
1575 #endif
1576 
1577 	moea64_initialized = TRUE;
1578 }
1579 
1580 boolean_t
1581 moea64_is_referenced(mmu_t mmu, vm_page_t m)
1582 {
1583 
1584 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1585 	    ("moea64_is_referenced: page %p is not managed", m));
1586 
1587 	return (moea64_query_bit(mmu, m, LPTE_REF));
1588 }
1589 
1590 boolean_t
1591 moea64_is_modified(mmu_t mmu, vm_page_t m)
1592 {
1593 
1594 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1595 	    ("moea64_is_modified: page %p is not managed", m));
1596 
1597 	/*
1598 	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
1599 	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
1600 	 * is clear, no PTEs can have LPTE_CHG set.
1601 	 */
1602 	VM_OBJECT_ASSERT_LOCKED(m->object);
1603 	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
1604 		return (FALSE);
1605 	return (moea64_query_bit(mmu, m, LPTE_CHG));
1606 }
1607 
1608 boolean_t
1609 moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1610 {
1611 	struct pvo_entry *pvo;
1612 	boolean_t rv = TRUE;
1613 
1614 	PMAP_LOCK(pmap);
1615 	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1616 	if (pvo != NULL)
1617 		rv = FALSE;
1618 	PMAP_UNLOCK(pmap);
1619 	return (rv);
1620 }
1621 
1622 void
1623 moea64_clear_modify(mmu_t mmu, vm_page_t m)
1624 {
1625 
1626 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1627 	    ("moea64_clear_modify: page %p is not managed", m));
1628 	VM_OBJECT_ASSERT_WLOCKED(m->object);
1629 	KASSERT(!vm_page_xbusied(m),
1630 	    ("moea64_clear_modify: page %p is exclusive busied", m));
1631 
1632 	/*
1633 	 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
1634 	 * set.  If the object containing the page is locked and the page is
1635 	 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
1636 	 */
1637 	if ((m->aflags & PGA_WRITEABLE) == 0)
1638 		return;
1639 	moea64_clear_bit(mmu, m, LPTE_CHG);
1640 }
1641 
1642 /*
1643  * Clear the write and modified bits in each of the given page's mappings.
1644  */
1645 void
1646 moea64_remove_write(mmu_t mmu, vm_page_t m)
1647 {
1648 	struct	pvo_entry *pvo;
1649 	int64_t	refchg, ret;
1650 	pmap_t	pmap;
1651 
1652 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1653 	    ("moea64_remove_write: page %p is not managed", m));
1654 
1655 	/*
1656 	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
1657 	 * set by another thread while the object is locked.  Thus,
1658 	 * if PGA_WRITEABLE is clear, no page table entries need updating.
1659 	 */
1660 	VM_OBJECT_ASSERT_WLOCKED(m->object);
1661 	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
1662 		return;
1663 	powerpc_sync();
1664 	PV_PAGE_LOCK(m);
1665 	refchg = 0;
1666 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1667 		pmap = pvo->pvo_pmap;
1668 		PMAP_LOCK(pmap);
1669 		if (!(pvo->pvo_vaddr & PVO_DEAD) &&
1670 		    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1671 			pvo->pvo_pte.prot &= ~VM_PROT_WRITE;
1672 			ret = MOEA64_PTE_REPLACE(mmu, pvo,
1673 			    MOEA64_PTE_PROT_UPDATE);
1674 			if (ret < 0)
1675 				ret = LPTE_CHG;
1676 			refchg |= ret;
1677 			if (pvo->pvo_pmap == kernel_pmap)
1678 				isync();
1679 		}
1680 		PMAP_UNLOCK(pmap);
1681 	}
1682 	if ((refchg | atomic_readandclear_32(&m->md.mdpg_attrs)) & LPTE_CHG)
1683 		vm_page_dirty(m);
1684 	vm_page_aflag_clear(m, PGA_WRITEABLE);
1685 	PV_PAGE_UNLOCK(m);
1686 }
1687 
1688 /*
1689  *	moea64_ts_referenced:
1690  *
1691  *	Return a count of reference bits for a page, clearing those bits.
1692  *	It is not necessary for every reference bit to be cleared, but it
1693  *	is necessary that 0 only be returned when there are truly no
1694  *	reference bits set.
1695  *
1696  *	XXX: The exact number of bits to check and clear is a matter that
1697  *	should be tested and standardized at some point in the future for
1698  *	optimal aging of shared pages.
1699  */
1700 int
1701 moea64_ts_referenced(mmu_t mmu, vm_page_t m)
1702 {
1703 
1704 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1705 	    ("moea64_ts_referenced: page %p is not managed", m));
1706 	return (moea64_clear_bit(mmu, m, LPTE_REF));
1707 }
1708 
1709 /*
1710  * Modify the WIMG settings of all mappings for a page.
1711  */
1712 void
1713 moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
1714 {
1715 	struct	pvo_entry *pvo;
1716 	int64_t	refchg;
1717 	pmap_t	pmap;
1718 	uint64_t lo;
1719 
1720 	if ((m->oflags & VPO_UNMANAGED) != 0) {
1721 		m->md.mdpg_cache_attrs = ma;
1722 		return;
1723 	}
1724 
1725 	lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
1726 
1727 	PV_PAGE_LOCK(m);
1728 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1729 		pmap = pvo->pvo_pmap;
1730 		PMAP_LOCK(pmap);
1731 		if (!(pvo->pvo_vaddr & PVO_DEAD)) {
1732 			pvo->pvo_pte.pa &= ~LPTE_WIMG;
1733 			pvo->pvo_pte.pa |= lo;
1734 			refchg = MOEA64_PTE_REPLACE(mmu, pvo,
1735 			    MOEA64_PTE_INVALIDATE);
1736 			if (refchg < 0)
1737 				refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ?
1738 				    LPTE_CHG : 0;
1739 			if ((pvo->pvo_vaddr & PVO_MANAGED) &&
1740 			    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1741 				refchg |=
1742 				    atomic_readandclear_32(&m->md.mdpg_attrs);
1743 				if (refchg & LPTE_CHG)
1744 					vm_page_dirty(m);
1745 				if (refchg & LPTE_REF)
1746 					vm_page_aflag_set(m, PGA_REFERENCED);
1747 			}
1748 			if (pvo->pvo_pmap == kernel_pmap)
1749 				isync();
1750 		}
1751 		PMAP_UNLOCK(pmap);
1752 	}
1753 	m->md.mdpg_cache_attrs = ma;
1754 	PV_PAGE_UNLOCK(m);
1755 }
1756 
1757 /*
1758  * Map a wired page into kernel virtual address space.
1759  */
1760 void
1761 moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
1762 {
1763 	int		error;
1764 	struct pvo_entry *pvo, *oldpvo;
1765 
1766 	pvo = alloc_pvo_entry(0);
1767 	pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1768 	pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma);
1769 	pvo->pvo_vaddr |= PVO_WIRED;
1770 
1771 	PMAP_LOCK(kernel_pmap);
1772 	oldpvo = moea64_pvo_find_va(kernel_pmap, va);
1773 	if (oldpvo != NULL)
1774 		moea64_pvo_remove_from_pmap(mmu, oldpvo);
1775 	init_pvo_entry(pvo, kernel_pmap, va);
1776 	error = moea64_pvo_enter(mmu, pvo, NULL);
1777 	PMAP_UNLOCK(kernel_pmap);
1778 
1779 	/* Free any dead pages */
1780 	if (oldpvo != NULL) {
1781 		PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1782 		moea64_pvo_remove_from_page(mmu, oldpvo);
1783 		PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1784 		free_pvo_entry(oldpvo);
1785 	}
1786 
1787 	if (error != 0 && error != ENOENT)
1788 		panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
1789 		    pa, error);
1790 }
1791 
1792 void
1793 moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
1794 {
1795 
1796 	moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1797 }
1798 
1799 /*
1800  * Extract the physical page address associated with the given kernel virtual
1801  * address.
1802  */
1803 vm_paddr_t
1804 moea64_kextract(mmu_t mmu, vm_offset_t va)
1805 {
1806 	struct		pvo_entry *pvo;
1807 	vm_paddr_t pa;
1808 
1809 	/*
1810 	 * Shortcut the direct-mapped case when applicable.  We never put
1811 	 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
1812 	 */
1813 	if (va < VM_MIN_KERNEL_ADDRESS)
1814 		return (va);
1815 
1816 	PMAP_LOCK(kernel_pmap);
1817 	pvo = moea64_pvo_find_va(kernel_pmap, va);
1818 	KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
1819 	    va));
1820 	pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1821 	PMAP_UNLOCK(kernel_pmap);
1822 	return (pa);
1823 }
1824 
1825 /*
1826  * Remove a wired page from kernel virtual address space.
1827  */
1828 void
1829 moea64_kremove(mmu_t mmu, vm_offset_t va)
1830 {
1831 	moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1832 }
1833 
1834 /*
1835  * Map a range of physical addresses into kernel virtual address space.
1836  *
1837  * The value passed in *virt is a suggested virtual address for the mapping.
1838  * Architectures which can support a direct-mapped physical to virtual region
1839  * can return the appropriate address within that region, leaving '*virt'
1840  * unchanged.  Other architectures should map the pages starting at '*virt' and
1841  * update '*virt' with the first usable address after the mapped region.
1842  */
1843 vm_offset_t
1844 moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
1845     vm_paddr_t pa_end, int prot)
1846 {
1847 	vm_offset_t	sva, va;
1848 
1849 	if (hw_direct_map) {
1850 		/*
1851 		 * Check if every page in the region is covered by the direct
1852 		 * map. The direct map covers all of physical memory. Use
1853 		 * moea64_calc_wimg() as a shortcut to see if the page is in
1854 		 * physical memory as a way to see if the direct map covers it.
1855 		 */
1856 		for (va = pa_start; va < pa_end; va += PAGE_SIZE)
1857 			if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M)
1858 				break;
1859 		if (va == pa_end)
1860 			return (pa_start);
1861 	}
1862 	sva = *virt;
1863 	va = sva;
1864 	/* XXX respect prot argument */
1865 	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1866 		moea64_kenter(mmu, va, pa_start);
1867 	*virt = va;
1868 
1869 	return (sva);
1870 }
1871 
1872 /*
1873  * Returns true if the pmap's pv is one of the first
1874  * 16 pvs linked to from this page.  This count may
1875  * be changed upwards or downwards in the future; it
1876  * is only necessary that true be returned for a small
1877  * subset of pmaps for proper page aging.
1878  */
1879 boolean_t
1880 moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
1881 {
1882         int loops;
1883 	struct pvo_entry *pvo;
1884 	boolean_t rv;
1885 
1886 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1887 	    ("moea64_page_exists_quick: page %p is not managed", m));
1888 	loops = 0;
1889 	rv = FALSE;
1890 	PV_PAGE_LOCK(m);
1891 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1892 		if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) {
1893 			rv = TRUE;
1894 			break;
1895 		}
1896 		if (++loops >= 16)
1897 			break;
1898 	}
1899 	PV_PAGE_UNLOCK(m);
1900 	return (rv);
1901 }
1902 
1903 void
1904 moea64_page_init(mmu_t mmu __unused, vm_page_t m)
1905 {
1906 
1907 	m->md.mdpg_attrs = 0;
1908 	m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
1909 	LIST_INIT(&m->md.mdpg_pvoh);
1910 }
1911 
1912 /*
1913  * Return the number of managed mappings to the given physical page
1914  * that are wired.
1915  */
1916 int
1917 moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
1918 {
1919 	struct pvo_entry *pvo;
1920 	int count;
1921 
1922 	count = 0;
1923 	if ((m->oflags & VPO_UNMANAGED) != 0)
1924 		return (count);
1925 	PV_PAGE_LOCK(m);
1926 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1927 		if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED)
1928 			count++;
1929 	PV_PAGE_UNLOCK(m);
1930 	return (count);
1931 }
1932 
1933 static uintptr_t	moea64_vsidcontext;
1934 
1935 uintptr_t
1936 moea64_get_unique_vsid(void) {
1937 	u_int entropy;
1938 	register_t hash;
1939 	uint32_t mask;
1940 	int i;
1941 
1942 	entropy = 0;
1943 	__asm __volatile("mftb %0" : "=r"(entropy));
1944 
1945 	mtx_lock(&moea64_slb_mutex);
1946 	for (i = 0; i < NVSIDS; i += VSID_NBPW) {
1947 		u_int	n;
1948 
1949 		/*
1950 		 * Create a new value by mutiplying by a prime and adding in
1951 		 * entropy from the timebase register.  This is to make the
1952 		 * VSID more random so that the PT hash function collides
1953 		 * less often.  (Note that the prime casues gcc to do shifts
1954 		 * instead of a multiply.)
1955 		 */
1956 		moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
1957 		hash = moea64_vsidcontext & (NVSIDS - 1);
1958 		if (hash == 0)		/* 0 is special, avoid it */
1959 			continue;
1960 		n = hash >> 5;
1961 		mask = 1 << (hash & (VSID_NBPW - 1));
1962 		hash = (moea64_vsidcontext & VSID_HASHMASK);
1963 		if (moea64_vsid_bitmap[n] & mask) {	/* collision? */
1964 			/* anything free in this bucket? */
1965 			if (moea64_vsid_bitmap[n] == 0xffffffff) {
1966 				entropy = (moea64_vsidcontext >> 20);
1967 				continue;
1968 			}
1969 			i = ffs(~moea64_vsid_bitmap[n]) - 1;
1970 			mask = 1 << i;
1971 			hash &= rounddown2(VSID_HASHMASK, VSID_NBPW);
1972 			hash |= i;
1973 		}
1974 		if (hash == VSID_VRMA)	/* also special, avoid this too */
1975 			continue;
1976 		KASSERT(!(moea64_vsid_bitmap[n] & mask),
1977 		    ("Allocating in-use VSID %#zx\n", hash));
1978 		moea64_vsid_bitmap[n] |= mask;
1979 		mtx_unlock(&moea64_slb_mutex);
1980 		return (hash);
1981 	}
1982 
1983 	mtx_unlock(&moea64_slb_mutex);
1984 	panic("%s: out of segments",__func__);
1985 }
1986 
1987 #ifdef __powerpc64__
1988 void
1989 moea64_pinit(mmu_t mmu, pmap_t pmap)
1990 {
1991 
1992 	RB_INIT(&pmap->pmap_pvo);
1993 
1994 	pmap->pm_slb_tree_root = slb_alloc_tree();
1995 	pmap->pm_slb = slb_alloc_user_cache();
1996 	pmap->pm_slb_len = 0;
1997 }
1998 #else
1999 void
2000 moea64_pinit(mmu_t mmu, pmap_t pmap)
2001 {
2002 	int	i;
2003 	uint32_t hash;
2004 
2005 	RB_INIT(&pmap->pmap_pvo);
2006 
2007 	if (pmap_bootstrapped)
2008 		pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
2009 		    (vm_offset_t)pmap);
2010 	else
2011 		pmap->pmap_phys = pmap;
2012 
2013 	/*
2014 	 * Allocate some segment registers for this pmap.
2015 	 */
2016 	hash = moea64_get_unique_vsid();
2017 
2018 	for (i = 0; i < 16; i++)
2019 		pmap->pm_sr[i] = VSID_MAKE(i, hash);
2020 
2021 	KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
2022 }
2023 #endif
2024 
2025 /*
2026  * Initialize the pmap associated with process 0.
2027  */
2028 void
2029 moea64_pinit0(mmu_t mmu, pmap_t pm)
2030 {
2031 
2032 	PMAP_LOCK_INIT(pm);
2033 	moea64_pinit(mmu, pm);
2034 	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
2035 }
2036 
2037 /*
2038  * Set the physical protection on the specified range of this map as requested.
2039  */
2040 static void
2041 moea64_pvo_protect(mmu_t mmu,  pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
2042 {
2043 	struct vm_page *pg;
2044 	vm_prot_t oldprot;
2045 	int32_t refchg;
2046 
2047 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
2048 
2049 	/*
2050 	 * Change the protection of the page.
2051 	 */
2052 	oldprot = pvo->pvo_pte.prot;
2053 	pvo->pvo_pte.prot = prot;
2054 	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2055 
2056 	/*
2057 	 * If the PVO is in the page table, update mapping
2058 	 */
2059 	refchg = MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_PROT_UPDATE);
2060 	if (refchg < 0)
2061 		refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0;
2062 
2063 	if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) &&
2064 	    (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
2065 		if ((pg->oflags & VPO_UNMANAGED) == 0)
2066 			vm_page_aflag_set(pg, PGA_EXECUTABLE);
2067 		moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
2068 		    pvo->pvo_pte.pa & LPTE_RPGN, PAGE_SIZE);
2069 	}
2070 
2071 	/*
2072 	 * Update vm about the REF/CHG bits if the page is managed and we have
2073 	 * removed write access.
2074 	 */
2075 	if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) &&
2076 	    (oldprot & VM_PROT_WRITE)) {
2077 		refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2078 		if (refchg & LPTE_CHG)
2079 			vm_page_dirty(pg);
2080 		if (refchg & LPTE_REF)
2081 			vm_page_aflag_set(pg, PGA_REFERENCED);
2082 	}
2083 }
2084 
2085 void
2086 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
2087     vm_prot_t prot)
2088 {
2089 	struct	pvo_entry *pvo, *tpvo, key;
2090 
2091 	CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
2092 	    sva, eva, prot);
2093 
2094 	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
2095 	    ("moea64_protect: non current pmap"));
2096 
2097 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2098 		moea64_remove(mmu, pm, sva, eva);
2099 		return;
2100 	}
2101 
2102 	PMAP_LOCK(pm);
2103 	key.pvo_vaddr = sva;
2104 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2105 	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2106 		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2107 		moea64_pvo_protect(mmu, pm, pvo, prot);
2108 	}
2109 	PMAP_UNLOCK(pm);
2110 }
2111 
2112 /*
2113  * Map a list of wired pages into kernel virtual address space.  This is
2114  * intended for temporary mappings which do not need page modification or
2115  * references recorded.  Existing mappings in the region are overwritten.
2116  */
2117 void
2118 moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
2119 {
2120 	while (count-- > 0) {
2121 		moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2122 		va += PAGE_SIZE;
2123 		m++;
2124 	}
2125 }
2126 
2127 /*
2128  * Remove page mappings from kernel virtual address space.  Intended for
2129  * temporary mappings entered by moea64_qenter.
2130  */
2131 void
2132 moea64_qremove(mmu_t mmu, vm_offset_t va, int count)
2133 {
2134 	while (count-- > 0) {
2135 		moea64_kremove(mmu, va);
2136 		va += PAGE_SIZE;
2137 	}
2138 }
2139 
2140 void
2141 moea64_release_vsid(uint64_t vsid)
2142 {
2143 	int idx, mask;
2144 
2145 	mtx_lock(&moea64_slb_mutex);
2146 	idx = vsid & (NVSIDS-1);
2147 	mask = 1 << (idx % VSID_NBPW);
2148 	idx /= VSID_NBPW;
2149 	KASSERT(moea64_vsid_bitmap[idx] & mask,
2150 	    ("Freeing unallocated VSID %#jx", vsid));
2151 	moea64_vsid_bitmap[idx] &= ~mask;
2152 	mtx_unlock(&moea64_slb_mutex);
2153 }
2154 
2155 
2156 void
2157 moea64_release(mmu_t mmu, pmap_t pmap)
2158 {
2159 
2160 	/*
2161 	 * Free segment registers' VSIDs
2162 	 */
2163     #ifdef __powerpc64__
2164 	slb_free_tree(pmap);
2165 	slb_free_user_cache(pmap->pm_slb);
2166     #else
2167 	KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
2168 
2169 	moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
2170     #endif
2171 }
2172 
2173 /*
2174  * Remove all pages mapped by the specified pmap
2175  */
2176 void
2177 moea64_remove_pages(mmu_t mmu, pmap_t pm)
2178 {
2179 	struct pvo_entry *pvo, *tpvo;
2180 	struct pvo_tree tofree;
2181 
2182 	RB_INIT(&tofree);
2183 
2184 	PMAP_LOCK(pm);
2185 	RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) {
2186 		if (pvo->pvo_vaddr & PVO_WIRED)
2187 			continue;
2188 
2189 		/*
2190 		 * For locking reasons, remove this from the page table and
2191 		 * pmap, but save delinking from the vm_page for a second
2192 		 * pass
2193 		 */
2194 		moea64_pvo_remove_from_pmap(mmu, pvo);
2195 		RB_INSERT(pvo_tree, &tofree, pvo);
2196 	}
2197 	PMAP_UNLOCK(pm);
2198 
2199 	RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) {
2200 		PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2201 		moea64_pvo_remove_from_page(mmu, pvo);
2202 		PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2203 		RB_REMOVE(pvo_tree, &tofree, pvo);
2204 		free_pvo_entry(pvo);
2205 	}
2206 }
2207 
2208 /*
2209  * Remove the given range of addresses from the specified map.
2210  */
2211 void
2212 moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2213 {
2214 	struct  pvo_entry *pvo, *tpvo, key;
2215 	struct pvo_tree tofree;
2216 
2217 	/*
2218 	 * Perform an unsynchronized read.  This is, however, safe.
2219 	 */
2220 	if (pm->pm_stats.resident_count == 0)
2221 		return;
2222 
2223 	key.pvo_vaddr = sva;
2224 
2225 	RB_INIT(&tofree);
2226 
2227 	PMAP_LOCK(pm);
2228 	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2229 	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2230 		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2231 
2232 		/*
2233 		 * For locking reasons, remove this from the page table and
2234 		 * pmap, but save delinking from the vm_page for a second
2235 		 * pass
2236 		 */
2237 		moea64_pvo_remove_from_pmap(mmu, pvo);
2238 		RB_INSERT(pvo_tree, &tofree, pvo);
2239 	}
2240 	PMAP_UNLOCK(pm);
2241 
2242 	RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) {
2243 		PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2244 		moea64_pvo_remove_from_page(mmu, pvo);
2245 		PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2246 		RB_REMOVE(pvo_tree, &tofree, pvo);
2247 		free_pvo_entry(pvo);
2248 	}
2249 }
2250 
2251 /*
2252  * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2253  * will reflect changes in pte's back to the vm_page.
2254  */
2255 void
2256 moea64_remove_all(mmu_t mmu, vm_page_t m)
2257 {
2258 	struct	pvo_entry *pvo, *next_pvo;
2259 	struct	pvo_head freequeue;
2260 	int	wasdead;
2261 	pmap_t	pmap;
2262 
2263 	LIST_INIT(&freequeue);
2264 
2265 	PV_PAGE_LOCK(m);
2266 	LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
2267 		pmap = pvo->pvo_pmap;
2268 		PMAP_LOCK(pmap);
2269 		wasdead = (pvo->pvo_vaddr & PVO_DEAD);
2270 		if (!wasdead)
2271 			moea64_pvo_remove_from_pmap(mmu, pvo);
2272 		moea64_pvo_remove_from_page(mmu, pvo);
2273 		if (!wasdead)
2274 			LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink);
2275 		PMAP_UNLOCK(pmap);
2276 
2277 	}
2278 	KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings"));
2279 	KASSERT(!(m->aflags & PGA_WRITEABLE), ("Page still writable"));
2280 	PV_PAGE_UNLOCK(m);
2281 
2282 	/* Clean up UMA allocations */
2283 	LIST_FOREACH_SAFE(pvo, &freequeue, pvo_vlink, next_pvo)
2284 		free_pvo_entry(pvo);
2285 }
2286 
2287 /*
2288  * Allocate a physical page of memory directly from the phys_avail map.
2289  * Can only be called from moea64_bootstrap before avail start and end are
2290  * calculated.
2291  */
2292 vm_offset_t
2293 moea64_bootstrap_alloc(vm_size_t size, u_int align)
2294 {
2295 	vm_offset_t	s, e;
2296 	int		i, j;
2297 
2298 	size = round_page(size);
2299 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2300 		if (align != 0)
2301 			s = roundup2(phys_avail[i], align);
2302 		else
2303 			s = phys_avail[i];
2304 		e = s + size;
2305 
2306 		if (s < phys_avail[i] || e > phys_avail[i + 1])
2307 			continue;
2308 
2309 		if (s + size > platform_real_maxaddr())
2310 			continue;
2311 
2312 		if (s == phys_avail[i]) {
2313 			phys_avail[i] += size;
2314 		} else if (e == phys_avail[i + 1]) {
2315 			phys_avail[i + 1] -= size;
2316 		} else {
2317 			for (j = phys_avail_count * 2; j > i; j -= 2) {
2318 				phys_avail[j] = phys_avail[j - 2];
2319 				phys_avail[j + 1] = phys_avail[j - 1];
2320 			}
2321 
2322 			phys_avail[i + 3] = phys_avail[i + 1];
2323 			phys_avail[i + 1] = s;
2324 			phys_avail[i + 2] = e;
2325 			phys_avail_count++;
2326 		}
2327 
2328 		return (s);
2329 	}
2330 	panic("moea64_bootstrap_alloc: could not allocate memory");
2331 }
2332 
2333 static int
2334 moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head)
2335 {
2336 	int first, err;
2337 
2338 	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2339 	KASSERT(moea64_pvo_find_va(pvo->pvo_pmap, PVO_VADDR(pvo)) == NULL,
2340 	    ("Existing mapping for VA %#jx", (uintmax_t)PVO_VADDR(pvo)));
2341 
2342 	moea64_pvo_enter_calls++;
2343 
2344 	/*
2345 	 * Add to pmap list
2346 	 */
2347 	RB_INSERT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2348 
2349 	/*
2350 	 * Remember if the list was empty and therefore will be the first
2351 	 * item.
2352 	 */
2353 	if (pvo_head != NULL) {
2354 		if (LIST_FIRST(pvo_head) == NULL)
2355 			first = 1;
2356 		LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2357 	}
2358 
2359 	if (pvo->pvo_vaddr & PVO_WIRED)
2360 		pvo->pvo_pmap->pm_stats.wired_count++;
2361 	pvo->pvo_pmap->pm_stats.resident_count++;
2362 
2363 	/*
2364 	 * Insert it into the hardware page table
2365 	 */
2366 	err = MOEA64_PTE_INSERT(mmu, pvo);
2367 	if (err != 0) {
2368 		panic("moea64_pvo_enter: overflow");
2369 	}
2370 
2371 	moea64_pvo_entries++;
2372 
2373 	if (pvo->pvo_pmap == kernel_pmap)
2374 		isync();
2375 
2376 #ifdef __powerpc64__
2377 	/*
2378 	 * Make sure all our bootstrap mappings are in the SLB as soon
2379 	 * as virtual memory is switched on.
2380 	 */
2381 	if (!pmap_bootstrapped)
2382 		moea64_bootstrap_slb_prefault(PVO_VADDR(pvo),
2383 		    pvo->pvo_vaddr & PVO_LARGE);
2384 #endif
2385 
2386 	return (first ? ENOENT : 0);
2387 }
2388 
2389 static void
2390 moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo)
2391 {
2392 	struct	vm_page *pg;
2393 	int32_t refchg;
2394 
2395 	KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap"));
2396 	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2397 	KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO"));
2398 
2399 	/*
2400 	 * If there is an active pte entry, we need to deactivate it
2401 	 */
2402 	refchg = MOEA64_PTE_UNSET(mmu, pvo);
2403 	if (refchg < 0) {
2404 		/*
2405 		 * If it was evicted from the page table, be pessimistic and
2406 		 * dirty the page.
2407 		 */
2408 		if (pvo->pvo_pte.prot & VM_PROT_WRITE)
2409 			refchg = LPTE_CHG;
2410 		else
2411 			refchg = 0;
2412 	}
2413 
2414 	/*
2415 	 * Update our statistics.
2416 	 */
2417 	pvo->pvo_pmap->pm_stats.resident_count--;
2418 	if (pvo->pvo_vaddr & PVO_WIRED)
2419 		pvo->pvo_pmap->pm_stats.wired_count--;
2420 
2421 	/*
2422 	 * Remove this PVO from the pmap list.
2423 	 */
2424 	RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2425 
2426 	/*
2427 	 * Mark this for the next sweep
2428 	 */
2429 	pvo->pvo_vaddr |= PVO_DEAD;
2430 
2431 	/* Send RC bits to VM */
2432 	if ((pvo->pvo_vaddr & PVO_MANAGED) &&
2433 	    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
2434 		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2435 		if (pg != NULL) {
2436 			refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2437 			if (refchg & LPTE_CHG)
2438 				vm_page_dirty(pg);
2439 			if (refchg & LPTE_REF)
2440 				vm_page_aflag_set(pg, PGA_REFERENCED);
2441 		}
2442 	}
2443 }
2444 
2445 static void
2446 moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
2447 {
2448 	struct	vm_page *pg;
2449 
2450 	KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page"));
2451 
2452 	/* Use NULL pmaps as a sentinel for races in page deletion */
2453 	if (pvo->pvo_pmap == NULL)
2454 		return;
2455 	pvo->pvo_pmap = NULL;
2456 
2457 	/*
2458 	 * Update vm about page writeability/executability if managed
2459 	 */
2460 	PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN);
2461 	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2462 
2463 	if ((pvo->pvo_vaddr & PVO_MANAGED) && pg != NULL) {
2464 		LIST_REMOVE(pvo, pvo_vlink);
2465 		if (LIST_EMPTY(vm_page_to_pvoh(pg)))
2466 			vm_page_aflag_clear(pg, PGA_WRITEABLE | PGA_EXECUTABLE);
2467 	}
2468 
2469 	moea64_pvo_entries--;
2470 	moea64_pvo_remove_calls++;
2471 }
2472 
2473 static struct pvo_entry *
2474 moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
2475 {
2476 	struct pvo_entry key;
2477 
2478 	PMAP_LOCK_ASSERT(pm, MA_OWNED);
2479 
2480 	key.pvo_vaddr = va & ~ADDR_POFF;
2481 	return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key));
2482 }
2483 
2484 static boolean_t
2485 moea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit)
2486 {
2487 	struct	pvo_entry *pvo;
2488 	int64_t ret;
2489 	boolean_t rv;
2490 
2491 	/*
2492 	 * See if this bit is stored in the page already.
2493 	 */
2494 	if (m->md.mdpg_attrs & ptebit)
2495 		return (TRUE);
2496 
2497 	/*
2498 	 * Examine each PTE.  Sync so that any pending REF/CHG bits are
2499 	 * flushed to the PTEs.
2500 	 */
2501 	rv = FALSE;
2502 	powerpc_sync();
2503 	PV_PAGE_LOCK(m);
2504 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2505 		ret = 0;
2506 
2507 		/*
2508 		 * See if this pvo has a valid PTE.  if so, fetch the
2509 		 * REF/CHG bits from the valid PTE.  If the appropriate
2510 		 * ptebit is set, return success.
2511 		 */
2512 		PMAP_LOCK(pvo->pvo_pmap);
2513 		if (!(pvo->pvo_vaddr & PVO_DEAD))
2514 			ret = MOEA64_PTE_SYNCH(mmu, pvo);
2515 		PMAP_UNLOCK(pvo->pvo_pmap);
2516 
2517 		if (ret > 0) {
2518 			atomic_set_32(&m->md.mdpg_attrs,
2519 			    ret & (LPTE_CHG | LPTE_REF));
2520 			if (ret & ptebit) {
2521 				rv = TRUE;
2522 				break;
2523 			}
2524 		}
2525 	}
2526 	PV_PAGE_UNLOCK(m);
2527 
2528 	return (rv);
2529 }
2530 
2531 static u_int
2532 moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2533 {
2534 	u_int	count;
2535 	struct	pvo_entry *pvo;
2536 	int64_t ret;
2537 
2538 	/*
2539 	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2540 	 * we can reset the right ones).
2541 	 */
2542 	powerpc_sync();
2543 
2544 	/*
2545 	 * For each pvo entry, clear the pte's ptebit.
2546 	 */
2547 	count = 0;
2548 	PV_PAGE_LOCK(m);
2549 	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2550 		ret = 0;
2551 
2552 		PMAP_LOCK(pvo->pvo_pmap);
2553 		if (!(pvo->pvo_vaddr & PVO_DEAD))
2554 			ret = MOEA64_PTE_CLEAR(mmu, pvo, ptebit);
2555 		PMAP_UNLOCK(pvo->pvo_pmap);
2556 
2557 		if (ret > 0 && (ret & ptebit))
2558 			count++;
2559 	}
2560 	atomic_clear_32(&m->md.mdpg_attrs, ptebit);
2561 	PV_PAGE_UNLOCK(m);
2562 
2563 	return (count);
2564 }
2565 
2566 boolean_t
2567 moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2568 {
2569 	struct pvo_entry *pvo, key;
2570 	vm_offset_t ppa;
2571 	int error = 0;
2572 
2573 	PMAP_LOCK(kernel_pmap);
2574 	key.pvo_vaddr = ppa = pa & ~ADDR_POFF;
2575 	for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
2576 	    ppa < pa + size; ppa += PAGE_SIZE,
2577 	    pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
2578 		if (pvo == NULL || (pvo->pvo_pte.pa & LPTE_RPGN) != ppa) {
2579 			error = EFAULT;
2580 			break;
2581 		}
2582 	}
2583 	PMAP_UNLOCK(kernel_pmap);
2584 
2585 	return (error);
2586 }
2587 
2588 /*
2589  * Map a set of physical memory pages into the kernel virtual
2590  * address space. Return a pointer to where it is mapped. This
2591  * routine is intended to be used for mapping device memory,
2592  * NOT real memory.
2593  */
2594 void *
2595 moea64_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
2596 {
2597 	vm_offset_t va, tmpva, ppa, offset;
2598 
2599 	ppa = trunc_page(pa);
2600 	offset = pa & PAGE_MASK;
2601 	size = roundup2(offset + size, PAGE_SIZE);
2602 
2603 	va = kva_alloc(size);
2604 
2605 	if (!va)
2606 		panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
2607 
2608 	for (tmpva = va; size > 0;) {
2609 		moea64_kenter_attr(mmu, tmpva, ppa, ma);
2610 		size -= PAGE_SIZE;
2611 		tmpva += PAGE_SIZE;
2612 		ppa += PAGE_SIZE;
2613 	}
2614 
2615 	return ((void *)(va + offset));
2616 }
2617 
2618 void *
2619 moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2620 {
2621 
2622 	return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
2623 }
2624 
2625 void
2626 moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2627 {
2628 	vm_offset_t base, offset;
2629 
2630 	base = trunc_page(va);
2631 	offset = va & PAGE_MASK;
2632 	size = roundup2(offset + size, PAGE_SIZE);
2633 
2634 	kva_free(base, size);
2635 }
2636 
2637 void
2638 moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2639 {
2640 	struct pvo_entry *pvo;
2641 	vm_offset_t lim;
2642 	vm_paddr_t pa;
2643 	vm_size_t len;
2644 
2645 	PMAP_LOCK(pm);
2646 	while (sz > 0) {
2647 		lim = round_page(va);
2648 		len = MIN(lim - va, sz);
2649 		pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
2650 		if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) {
2651 			pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va & ADDR_POFF);
2652 			moea64_syncicache(mmu, pm, va, pa, len);
2653 		}
2654 		va += len;
2655 		sz -= len;
2656 	}
2657 	PMAP_UNLOCK(pm);
2658 }
2659 
2660 void
2661 moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
2662 {
2663 
2664 	*va = (void *)pa;
2665 }
2666 
2667 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
2668 
2669 void
2670 moea64_scan_init(mmu_t mmu)
2671 {
2672 	struct pvo_entry *pvo;
2673 	vm_offset_t va;
2674 	int i;
2675 
2676 	if (!do_minidump) {
2677 		/* Initialize phys. segments for dumpsys(). */
2678 		memset(&dump_map, 0, sizeof(dump_map));
2679 		mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
2680 		for (i = 0; i < pregions_sz; i++) {
2681 			dump_map[i].pa_start = pregions[i].mr_start;
2682 			dump_map[i].pa_size = pregions[i].mr_size;
2683 		}
2684 		return;
2685 	}
2686 
2687 	/* Virtual segments for minidumps: */
2688 	memset(&dump_map, 0, sizeof(dump_map));
2689 
2690 	/* 1st: kernel .data and .bss. */
2691 	dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
2692 	dump_map[0].pa_size = round_page((uintptr_t)_end) -
2693 	    dump_map[0].pa_start;
2694 
2695 	/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2696 	dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr;
2697 	dump_map[1].pa_size = round_page(msgbufp->msg_size);
2698 
2699 	/* 3rd: kernel VM. */
2700 	va = dump_map[1].pa_start + dump_map[1].pa_size;
2701 	/* Find start of next chunk (from va). */
2702 	while (va < virtual_end) {
2703 		/* Don't dump the buffer cache. */
2704 		if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
2705 			va = kmi.buffer_eva;
2706 			continue;
2707 		}
2708 		pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
2709 		if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD))
2710 			break;
2711 		va += PAGE_SIZE;
2712 	}
2713 	if (va < virtual_end) {
2714 		dump_map[2].pa_start = va;
2715 		va += PAGE_SIZE;
2716 		/* Find last page in chunk. */
2717 		while (va < virtual_end) {
2718 			/* Don't run into the buffer cache. */
2719 			if (va == kmi.buffer_sva)
2720 				break;
2721 			pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
2722 			if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD))
2723 				break;
2724 			va += PAGE_SIZE;
2725 		}
2726 		dump_map[2].pa_size = va - dump_map[2].pa_start;
2727 	}
2728 }
2729 
2730