xref: /netbsd/sys/arch/sh3/sh3/pmap.c (revision 6550d01e)
1 /*	$NetBSD: pmap.c,v 1.77 2010/11/12 07:59:27 uebayasi Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by UCHIYAMA Yasushi.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.77 2010/11/12 07:59:27 uebayasi Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/pool.h>
38 #include <sys/msgbuf.h>
39 #include <sys/socketvar.h>	/* XXX: for sock_loan_thresh */
40 
41 #include <uvm/uvm.h>
42 
43 #include <sh3/mmu.h>
44 #include <sh3/cache.h>
45 
46 #ifdef DEBUG
47 #define	STATIC
48 #else
49 #define	STATIC	static
50 #endif
51 
52 #define	__PMAP_PTP_SHIFT	22
53 #define	__PMAP_PTP_TRUNC(va)						\
54 	(((va) + (1 << __PMAP_PTP_SHIFT) - 1) & ~((1 << __PMAP_PTP_SHIFT) - 1))
55 #define	__PMAP_PTP_PG_N		(PAGE_SIZE / sizeof(pt_entry_t))
56 #define	__PMAP_PTP_INDEX(va)	(((va) >> __PMAP_PTP_SHIFT) & (__PMAP_PTP_N - 1))
57 #define	__PMAP_PTP_OFSET(va)	((va >> PGSHIFT) & (__PMAP_PTP_PG_N - 1))
58 
59 struct pmap __pmap_kernel;
60 struct pmap *const kernel_pmap_ptr = &__pmap_kernel;
61 STATIC vaddr_t __pmap_kve;	/* VA of last kernel virtual */
62 paddr_t avail_start;		/* PA of first available physical page */
63 paddr_t avail_end;		/* PA of last available physical page */
64 
65 /* For the fast tlb miss handler */
66 pt_entry_t **curptd;		/* p1 va of curlwp->...->pm_ptp */
67 
68 /* pmap pool */
69 STATIC struct pool __pmap_pmap_pool;
70 
71 /* pv_entry ops. */
72 struct pv_entry {
73 	struct pmap *pv_pmap;
74 	vaddr_t pv_va;
75 	SLIST_ENTRY(pv_entry) pv_link;
76 };
77 #define	__pmap_pv_alloc()	pool_get(&__pmap_pv_pool, PR_NOWAIT)
78 #define	__pmap_pv_free(pv)	pool_put(&__pmap_pv_pool, (pv))
79 STATIC void __pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t);
80 STATIC void __pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t);
81 STATIC void *__pmap_pv_page_alloc(struct pool *, int);
82 STATIC void __pmap_pv_page_free(struct pool *, void *);
83 STATIC struct pool __pmap_pv_pool;
84 STATIC struct pool_allocator pmap_pv_page_allocator = {
85 	__pmap_pv_page_alloc, __pmap_pv_page_free, 0,
86 };
87 
88 /* ASID ops. */
89 STATIC int __pmap_asid_alloc(void);
90 STATIC void __pmap_asid_free(int);
91 STATIC struct {
92 	uint32_t map[8];
93 	int hint;	/* hint for next allocation */
94 } __pmap_asid;
95 
96 /* page table entry ops. */
97 STATIC pt_entry_t *__pmap_pte_alloc(pmap_t, vaddr_t);
98 
99 /* pmap_enter util */
100 STATIC bool __pmap_map_change(pmap_t, vaddr_t, paddr_t, vm_prot_t,
101     pt_entry_t);
102 
103 void
104 pmap_bootstrap(void)
105 {
106 
107 	/* Steal msgbuf area */
108 	initmsgbuf((void *)uvm_pageboot_alloc(MSGBUFSIZE), MSGBUFSIZE);
109 
110 	avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
111 	avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
112 	__pmap_kve = VM_MIN_KERNEL_ADDRESS;
113 
114 	pmap_kernel()->pm_refcnt = 1;
115 	pmap_kernel()->pm_ptp = (pt_entry_t **)uvm_pageboot_alloc(PAGE_SIZE);
116 	memset(pmap_kernel()->pm_ptp, 0, PAGE_SIZE);
117 
118 	/* Enable MMU */
119 	sh_mmu_start();
120 	/* Mask all interrupt */
121 	_cpu_intr_suspend();
122 	/* Enable exception for P3 access */
123 	_cpu_exception_resume(0);
124 }
125 
126 vaddr_t
127 pmap_steal_memory(vsize_t size, vaddr_t *vstart, vaddr_t *vend)
128 {
129 	struct vm_physseg *bank;
130 	int i, j, npage;
131 	paddr_t pa;
132 	vaddr_t va;
133 
134 	KDASSERT(!uvm.page_init_done);
135 
136 	size = round_page(size);
137 	npage = atop(size);
138 
139 	bank = NULL;
140 	for (i = 0; i < vm_nphysseg; i++) {
141 		bank = VM_PHYSMEM_PTR(i);
142 		if (npage <= bank->avail_end - bank->avail_start)
143 			break;
144 	}
145 	KDASSERT(i != vm_nphysseg);
146 	KDASSERT(bank != NULL);
147 
148 	/* Steal pages */
149 	pa = ptoa(bank->avail_start);
150 	bank->avail_start += npage;
151 	bank->start += npage;
152 
153 	/* GC memory bank */
154 	if (bank->avail_start == bank->end) {
155 		/* Remove this segment from the list. */
156 		vm_nphysseg--;
157 		KDASSERT(vm_nphysseg > 0);
158 		for (j = i; i < vm_nphysseg; j++)
159 			VM_PHYSMEM_PTR_SWAP(j, j + 1);
160 	}
161 
162 	va = SH3_PHYS_TO_P1SEG(pa);
163 	memset((void *)va, 0, size);
164 
165 	return (va);
166 }
167 
168 vaddr_t
169 pmap_growkernel(vaddr_t maxkvaddr)
170 {
171 	int i, n;
172 
173 	if (maxkvaddr <= __pmap_kve)
174 		return (__pmap_kve);
175 
176 	i = __PMAP_PTP_INDEX(__pmap_kve - VM_MIN_KERNEL_ADDRESS);
177 	__pmap_kve = __PMAP_PTP_TRUNC(maxkvaddr);
178 	n = __PMAP_PTP_INDEX(__pmap_kve - VM_MIN_KERNEL_ADDRESS);
179 
180 	/* Allocate page table pages */
181 	for (;i < n; i++) {
182 		if (__pmap_kernel.pm_ptp[i] != NULL)
183 			continue;
184 
185 		if (uvm.page_init_done) {
186 			struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL,
187 			    UVM_PGA_USERESERVE | UVM_PGA_ZERO);
188 			if (pg == NULL)
189 				goto error;
190 			__pmap_kernel.pm_ptp[i] = (pt_entry_t *)
191 			    SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
192 		} else {
193 			pt_entry_t *ptp = (pt_entry_t *)
194 			    uvm_pageboot_alloc(PAGE_SIZE);
195 			if (ptp == NULL)
196 				goto error;
197 			__pmap_kernel.pm_ptp[i] = ptp;
198 			memset(ptp, 0, PAGE_SIZE);
199 		}
200 	}
201 
202 	return (__pmap_kve);
203  error:
204 	panic("pmap_growkernel: out of memory.");
205 	/* NOTREACHED */
206 }
207 
208 void
209 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
210 {
211 
212 	*start = VM_MIN_KERNEL_ADDRESS;
213 	*end = VM_MAX_KERNEL_ADDRESS;
214 }
215 
216 void
217 pmap_init(void)
218 {
219 
220 	/* Initialize pmap module */
221 	pool_init(&__pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
222 	    &pool_allocator_nointr, IPL_NONE);
223 	pool_init(&__pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
224 	    &pmap_pv_page_allocator, IPL_NONE);
225 	pool_setlowat(&__pmap_pv_pool, 16);
226 
227 #ifdef SH4
228 	if (SH_HAS_VIRTUAL_ALIAS) {
229 		/*
230 		 * XXX
231 		 * Disable sosend_loan() in src/sys/kern/uipc_socket.c
232 		 * on SH4 to avoid possible virtual cache aliases and
233 		 * unnecessary map/unmap thrashing in __pmap_pv_enter().
234 		 * (also see comments in __pmap_pv_enter())
235 		 *
236 		 * Ideally, read only shared mapping won't cause aliases
237 		 * so __pmap_pv_enter() should handle any shared read only
238 		 * mappings like ARM pmap.
239 		 */
240 		sock_loan_thresh = -1;
241 	}
242 #endif
243 }
244 
245 pmap_t
246 pmap_create(void)
247 {
248 	pmap_t pmap;
249 
250 	pmap = pool_get(&__pmap_pmap_pool, PR_WAITOK);
251 	memset(pmap, 0, sizeof(struct pmap));
252 	pmap->pm_asid = -1;
253 	pmap->pm_refcnt = 1;
254 	/* Allocate page table page holder (512 slot) */
255 	pmap->pm_ptp = (pt_entry_t **)
256 	    SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(
257 		    uvm_pagealloc(NULL, 0, NULL,
258 			UVM_PGA_USERESERVE | UVM_PGA_ZERO)));
259 
260 	return (pmap);
261 }
262 
263 void
264 pmap_destroy(pmap_t pmap)
265 {
266 	int i;
267 
268 	if (--pmap->pm_refcnt > 0)
269 		return;
270 
271 	/* Deallocate all page table page */
272 	for (i = 0; i < __PMAP_PTP_N; i++) {
273 		vaddr_t va = (vaddr_t)pmap->pm_ptp[i];
274 		if (va == 0)
275 			continue;
276 #ifdef DEBUG	/* Check no mapping exists. */
277 		{
278 			int j;
279 			pt_entry_t *pte = (pt_entry_t *)va;
280 			for (j = 0; j < __PMAP_PTP_PG_N; j++, pte++)
281 				KDASSERT(*pte == 0);
282 		}
283 #endif /* DEBUG */
284 		/* Purge cache entry for next use of this page. */
285 		if (SH_HAS_VIRTUAL_ALIAS)
286 			sh_dcache_inv_range(va, PAGE_SIZE);
287 		/* Free page table */
288 		uvm_pagefree(PHYS_TO_VM_PAGE(SH3_P1SEG_TO_PHYS(va)));
289 	}
290 	/* Deallocate page table page holder */
291 	if (SH_HAS_VIRTUAL_ALIAS)
292 		sh_dcache_inv_range((vaddr_t)pmap->pm_ptp, PAGE_SIZE);
293 	uvm_pagefree(PHYS_TO_VM_PAGE(SH3_P1SEG_TO_PHYS((vaddr_t)pmap->pm_ptp)));
294 
295 	/* Free ASID */
296 	__pmap_asid_free(pmap->pm_asid);
297 
298 	pool_put(&__pmap_pmap_pool, pmap);
299 }
300 
301 void
302 pmap_reference(pmap_t pmap)
303 {
304 
305 	pmap->pm_refcnt++;
306 }
307 
308 void
309 pmap_activate(struct lwp *l)
310 {
311 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
312 
313 	if (pmap->pm_asid == -1)
314 		pmap->pm_asid = __pmap_asid_alloc();
315 
316 	KDASSERT(pmap->pm_asid >=0 && pmap->pm_asid < 256);
317 
318 	sh_tlb_set_asid(pmap->pm_asid);
319 	curptd = pmap->pm_ptp;
320 }
321 
322 void
323 pmap_deactivate(struct lwp *l)
324 {
325 
326 	/* Nothing to do */
327 }
328 
329 int
330 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
331 {
332 	struct vm_page *pg;
333 	struct vm_page_md *pvh;
334 	pt_entry_t entry, *pte;
335 	bool kva = (pmap == pmap_kernel());
336 
337 	/* "flags" never exceed "prot" */
338 	KDASSERT(prot != 0 && ((flags & VM_PROT_ALL) & ~prot) == 0);
339 
340 	pg = PHYS_TO_VM_PAGE(pa);
341 	entry = (pa & PG_PPN) | PG_4K;
342 	if (flags & PMAP_WIRED)
343 		entry |= _PG_WIRED;
344 
345 	if (pg != NULL) {	/* memory-space */
346 		pvh = VM_PAGE_TO_MD(pg);
347 		entry |= PG_C;	/* always cached */
348 
349 		/* Seed modified/reference tracking */
350 		if (flags & VM_PROT_WRITE) {
351 			entry |= PG_V | PG_D;
352 			pvh->pvh_flags |= PVH_MODIFIED | PVH_REFERENCED;
353 		} else if (flags & VM_PROT_ALL) {
354 			entry |= PG_V;
355 			pvh->pvh_flags |= PVH_REFERENCED;
356 		}
357 
358 		/* Protection */
359 		if ((prot & VM_PROT_WRITE) && (pvh->pvh_flags & PVH_MODIFIED)) {
360 			if (kva)
361 				entry |= PG_PR_KRW | PG_SH;
362 			else
363 				entry |= PG_PR_URW;
364 		} else {
365 			/* RO or COW page */
366 			if (kva)
367 				entry |= PG_PR_KRO | PG_SH;
368 			else
369 				entry |= PG_PR_URO;
370 		}
371 
372 		/* Check for existing mapping */
373 		if (__pmap_map_change(pmap, va, pa, prot, entry))
374 			return (0);
375 
376 		/* Add to physical-virtual map list of this page */
377 		__pmap_pv_enter(pmap, pg, va);
378 
379 	} else {	/* bus-space (always uncached map) */
380 		if (kva) {
381 			entry |= PG_V | PG_SH |
382 			    ((prot & VM_PROT_WRITE) ?
383 			    (PG_PR_KRW | PG_D) : PG_PR_KRO);
384 		} else {
385 			entry |= PG_V |
386 			    ((prot & VM_PROT_WRITE) ?
387 			    (PG_PR_URW | PG_D) : PG_PR_URO);
388 		}
389 	}
390 
391 	/* Register to page table */
392 	if (kva)
393 		pte = __pmap_kpte_lookup(va);
394 	else {
395 		pte = __pmap_pte_alloc(pmap, va);
396 		if (pte == NULL) {
397 			if (flags & PMAP_CANFAIL)
398 				return ENOMEM;
399 			panic("pmap_enter: cannot allocate pte");
400 		}
401 	}
402 
403 	*pte = entry;
404 
405 	if (pmap->pm_asid != -1)
406 		sh_tlb_update(pmap->pm_asid, va, entry);
407 
408 	if (!SH_HAS_UNIFIED_CACHE &&
409 	    (prot == (VM_PROT_READ | VM_PROT_EXECUTE)))
410 		sh_icache_sync_range_index(va, PAGE_SIZE);
411 
412 	if (entry & _PG_WIRED)
413 		pmap->pm_stats.wired_count++;
414 	pmap->pm_stats.resident_count++;
415 
416 	return (0);
417 }
418 
419 /*
420  * bool __pmap_map_change(pmap_t pmap, vaddr_t va, paddr_t pa,
421  *     vm_prot_t prot, pt_entry_t entry):
422  *	Handle the situation that pmap_enter() is called to enter a
423  *	mapping at a virtual address for which a mapping already
424  *	exists.
425  */
426 bool
427 __pmap_map_change(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
428     pt_entry_t entry)
429 {
430 	pt_entry_t *pte, oentry;
431 	vaddr_t eva = va + PAGE_SIZE;
432 
433 	if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
434 	    ((oentry = *pte) == 0))
435 		return (false);		/* no mapping exists. */
436 
437 	if (pa != (oentry & PG_PPN)) {
438 		/* Enter a mapping at a mapping to another physical page. */
439 		pmap_remove(pmap, va, eva);
440 		return (false);
441 	}
442 
443 	/* Pre-existing mapping */
444 
445 	/* Protection change. */
446 	if ((oentry & PG_PR_MASK) != (entry & PG_PR_MASK))
447 		pmap_protect(pmap, va, eva, prot);
448 
449 	/* Wired change */
450 	if (oentry & _PG_WIRED) {
451 		if (!(entry & _PG_WIRED)) {
452 			/* wired -> unwired */
453 			*pte = entry;
454 			/* "wired" is software bits. no need to update TLB */
455 			pmap->pm_stats.wired_count--;
456 		}
457 	} else if (entry & _PG_WIRED) {
458 		/* unwired -> wired. make sure to reflect "flags" */
459 		pmap_remove(pmap, va, eva);
460 		return (false);
461 	}
462 
463 	return (true);	/* mapping was changed. */
464 }
465 
466 /*
467  * void __pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr):
468  *	Insert physical-virtual map to vm_page.
469  *	Assume pre-existed mapping is already removed.
470  */
471 void
472 __pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va)
473 {
474 	struct vm_page_md *pvh;
475 	struct pv_entry *pv;
476 	int s;
477 
478 	s = splvm();
479 	if (SH_HAS_VIRTUAL_ALIAS) {
480 		/*
481 		 * Remove all other mappings on this physical page
482 		 * which have different virtual cache indexes to
483 		 * avoid virtual cache aliases.
484 		 *
485 		 * XXX We should also handle shared mappings which
486 		 * XXX have different virtual cache indexes by
487 		 * XXX mapping them uncached (like arm and mips do).
488 		 */
489  again:
490 		pvh = VM_PAGE_TO_MD(pg);
491 		SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
492 			if (sh_cache_indexof(va) !=
493 			    sh_cache_indexof(pv->pv_va)) {
494 				pmap_remove(pv->pv_pmap, pv->pv_va,
495 				    pv->pv_va + PAGE_SIZE);
496 				goto again;
497 			}
498 		}
499 	}
500 
501 	/* Register pv map */
502 	pvh = VM_PAGE_TO_MD(pg);
503 	pv = __pmap_pv_alloc();
504 	pv->pv_pmap = pmap;
505 	pv->pv_va = va;
506 
507 	SLIST_INSERT_HEAD(&pvh->pvh_head, pv, pv_link);
508 	splx(s);
509 }
510 
511 void
512 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
513 {
514 	struct vm_page *pg;
515 	pt_entry_t *pte, entry;
516 	vaddr_t va;
517 
518 	KDASSERT((sva & PGOFSET) == 0);
519 
520 	for (va = sva; va < eva; va += PAGE_SIZE) {
521 		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
522 		    (entry = *pte) == 0)
523 			continue;
524 
525 		if ((pg = PHYS_TO_VM_PAGE(entry & PG_PPN)) != NULL)
526 			__pmap_pv_remove(pmap, pg, va);
527 
528 		if (entry & _PG_WIRED)
529 			pmap->pm_stats.wired_count--;
530 		pmap->pm_stats.resident_count--;
531 		*pte = 0;
532 
533 		/*
534 		 * When pmap->pm_asid == -1 (invalid ASID), old entry attribute
535 		 * to this pmap is already removed by pmap_activate().
536 		 */
537 		if (pmap->pm_asid != -1)
538 			sh_tlb_invalidate_addr(pmap->pm_asid, va);
539 	}
540 }
541 
542 /*
543  * void __pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr):
544  *	Remove physical-virtual map from vm_page.
545  */
546 void
547 __pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr)
548 {
549 	struct vm_page_md *pvh;
550 	struct pv_entry *pv;
551 	int s;
552 
553 	s = splvm();
554 	pvh = VM_PAGE_TO_MD(pg);
555 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
556 		if (pv->pv_pmap == pmap && pv->pv_va == vaddr) {
557 			if (SH_HAS_VIRTUAL_ALIAS ||
558 			    (SH_HAS_WRITEBACK_CACHE &&
559 				(pvh->pvh_flags & PVH_MODIFIED))) {
560 				/*
561 				 * Always use index ops. since I don't want to
562 				 * worry about address space.
563 				 */
564 				sh_dcache_wbinv_range_index
565 				    (pv->pv_va, PAGE_SIZE);
566 			}
567 
568 			SLIST_REMOVE(&pvh->pvh_head, pv, pv_entry, pv_link);
569 			__pmap_pv_free(pv);
570 			break;
571 		}
572 	}
573 #ifdef DEBUG
574 	/* Check duplicated map. */
575 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link)
576 	    KDASSERT(!(pv->pv_pmap == pmap && pv->pv_va == vaddr));
577 #endif
578 	splx(s);
579 }
580 
581 void
582 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
583 {
584 	pt_entry_t *pte, entry;
585 
586 	KDASSERT((va & PGOFSET) == 0);
587 	KDASSERT(va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS);
588 
589 	entry = (pa & PG_PPN) | PG_V | PG_SH | PG_4K;
590 	if (prot & VM_PROT_WRITE)
591 		entry |= (PG_PR_KRW | PG_D);
592 	else
593 		entry |= PG_PR_KRO;
594 
595 	if (PHYS_TO_VM_PAGE(pa))
596 		entry |= PG_C;
597 
598 	pte = __pmap_kpte_lookup(va);
599 
600 	KDASSERT(*pte == 0);
601 	*pte = entry;
602 
603 	sh_tlb_update(0, va, entry);
604 }
605 
606 void
607 pmap_kremove(vaddr_t va, vsize_t len)
608 {
609 	pt_entry_t *pte;
610 	vaddr_t eva = va + len;
611 
612 	KDASSERT((va & PGOFSET) == 0);
613 	KDASSERT((len & PGOFSET) == 0);
614 	KDASSERT(va >= VM_MIN_KERNEL_ADDRESS && eva <= VM_MAX_KERNEL_ADDRESS);
615 
616 	for (; va < eva; va += PAGE_SIZE) {
617 		pte = __pmap_kpte_lookup(va);
618 		KDASSERT(pte != NULL);
619 		if (*pte == 0)
620 			continue;
621 
622 		if (SH_HAS_VIRTUAL_ALIAS && PHYS_TO_VM_PAGE(*pte & PG_PPN))
623 			sh_dcache_wbinv_range(va, PAGE_SIZE);
624 		*pte = 0;
625 
626 		sh_tlb_invalidate_addr(0, va);
627 	}
628 }
629 
630 bool
631 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
632 {
633 	pt_entry_t *pte;
634 
635 	/* handle P1 and P2 specially: va == pa */
636 	if (pmap == pmap_kernel() && (va >> 30) == 2) {
637 		if (pap != NULL)
638 			*pap = va & SH3_PHYS_MASK;
639 		return (true);
640 	}
641 
642 	pte = __pmap_pte_lookup(pmap, va);
643 	if (pte == NULL || *pte == 0)
644 		return (false);
645 
646 	if (pap != NULL)
647 		*pap = (*pte & PG_PPN) | (va & PGOFSET);
648 
649 	return (true);
650 }
651 
652 void
653 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
654 {
655 	bool kernel = pmap == pmap_kernel();
656 	pt_entry_t *pte, entry, protbits;
657 	vaddr_t va;
658 
659 	sva = trunc_page(sva);
660 
661 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
662 		pmap_remove(pmap, sva, eva);
663 		return;
664 	}
665 
666 	switch (prot) {
667 	default:
668 		panic("pmap_protect: invalid protection mode %x", prot);
669 		/* NOTREACHED */
670 	case VM_PROT_READ:
671 		/* FALLTHROUGH */
672 	case VM_PROT_READ | VM_PROT_EXECUTE:
673 		protbits = kernel ? PG_PR_KRO : PG_PR_URO;
674 		break;
675 	case VM_PROT_READ | VM_PROT_WRITE:
676 		/* FALLTHROUGH */
677 	case VM_PROT_ALL:
678 		protbits = kernel ? PG_PR_KRW : PG_PR_URW;
679 		break;
680 	}
681 
682 	for (va = sva; va < eva; va += PAGE_SIZE) {
683 
684 		if (((pte = __pmap_pte_lookup(pmap, va)) == NULL) ||
685 		    (entry = *pte) == 0)
686 			continue;
687 
688 		if (SH_HAS_VIRTUAL_ALIAS && (entry & PG_D)) {
689 			if (!SH_HAS_UNIFIED_CACHE && (prot & VM_PROT_EXECUTE))
690 				sh_icache_sync_range_index(va, PAGE_SIZE);
691 			else
692 				sh_dcache_wbinv_range_index(va, PAGE_SIZE);
693 		}
694 
695 		entry = (entry & ~PG_PR_MASK) | protbits;
696 		*pte = entry;
697 
698 		if (pmap->pm_asid != -1)
699 			sh_tlb_update(pmap->pm_asid, va, entry);
700 	}
701 }
702 
703 void
704 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
705 {
706 	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
707 	struct pv_entry *pv;
708 	struct pmap *pmap;
709 	vaddr_t va;
710 	int s;
711 
712 	switch (prot) {
713 	case VM_PROT_READ | VM_PROT_WRITE:
714 		/* FALLTHROUGH */
715 	case VM_PROT_ALL:
716 		break;
717 
718 	case VM_PROT_READ:
719 		/* FALLTHROUGH */
720 	case VM_PROT_READ | VM_PROT_EXECUTE:
721 		s = splvm();
722 		SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
723 			pmap = pv->pv_pmap;
724 			va = pv->pv_va;
725 
726 			KDASSERT(pmap);
727 			pmap_protect(pmap, va, va + PAGE_SIZE, prot);
728 		}
729 		splx(s);
730 		break;
731 
732 	default:
733 		/* Remove all */
734 		s = splvm();
735 		while ((pv = SLIST_FIRST(&pvh->pvh_head)) != NULL) {
736 			va = pv->pv_va;
737 			pmap_remove(pv->pv_pmap, va, va + PAGE_SIZE);
738 		}
739 		splx(s);
740 	}
741 }
742 
743 void
744 pmap_unwire(pmap_t pmap, vaddr_t va)
745 {
746 	pt_entry_t *pte, entry;
747 
748 	if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
749 	    (entry = *pte) == 0 ||
750 	    (entry & _PG_WIRED) == 0)
751 		return;
752 
753 	*pte = entry & ~_PG_WIRED;
754 	pmap->pm_stats.wired_count--;
755 }
756 
757 void
758 pmap_procwr(struct proc	*p, vaddr_t va, size_t len)
759 {
760 
761 	if (!SH_HAS_UNIFIED_CACHE)
762 		sh_icache_sync_range_index(va, len);
763 }
764 
765 void
766 pmap_zero_page(paddr_t phys)
767 {
768 
769 	if (SH_HAS_VIRTUAL_ALIAS) {	/* don't polute cache */
770 		/* sync cache since we access via P2. */
771 		sh_dcache_wbinv_all();
772 		memset((void *)SH3_PHYS_TO_P2SEG(phys), 0, PAGE_SIZE);
773 	} else {
774 		memset((void *)SH3_PHYS_TO_P1SEG(phys), 0, PAGE_SIZE);
775 	}
776 }
777 
778 void
779 pmap_copy_page(paddr_t src, paddr_t dst)
780 {
781 
782 	if (SH_HAS_VIRTUAL_ALIAS) {	/* don't polute cache */
783 		/* sync cache since we access via P2. */
784 		sh_dcache_wbinv_all();
785 		memcpy((void *)SH3_PHYS_TO_P2SEG(dst),
786 		    (void *)SH3_PHYS_TO_P2SEG(src), PAGE_SIZE);
787 	} else {
788 		memcpy((void *)SH3_PHYS_TO_P1SEG(dst),
789 		    (void *)SH3_PHYS_TO_P1SEG(src), PAGE_SIZE);
790 	}
791 }
792 
793 bool
794 pmap_is_referenced(struct vm_page *pg)
795 {
796 	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
797 
798 	return ((pvh->pvh_flags & PVH_REFERENCED) ? true : false);
799 }
800 
801 bool
802 pmap_clear_reference(struct vm_page *pg)
803 {
804 	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
805 	struct pv_entry *pv;
806 	pt_entry_t *pte;
807 	pmap_t pmap;
808 	vaddr_t va;
809 	int s;
810 
811 	if ((pvh->pvh_flags & PVH_REFERENCED) == 0)
812 		return (false);
813 
814 	pvh->pvh_flags &= ~PVH_REFERENCED;
815 
816 	s = splvm();
817 	/* Restart reference bit emulation */
818 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
819 		pmap = pv->pv_pmap;
820 		va = pv->pv_va;
821 
822 		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL)
823 			continue;
824 		if ((*pte & PG_V) == 0)
825 			continue;
826 		*pte &= ~PG_V;
827 
828 		if (pmap->pm_asid != -1)
829 			sh_tlb_invalidate_addr(pmap->pm_asid, va);
830 	}
831 	splx(s);
832 
833 	return (true);
834 }
835 
836 bool
837 pmap_is_modified(struct vm_page *pg)
838 {
839 	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
840 
841 	return ((pvh->pvh_flags & PVH_MODIFIED) ? true : false);
842 }
843 
844 bool
845 pmap_clear_modify(struct vm_page *pg)
846 {
847 	struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
848 	struct pv_entry *pv;
849 	struct pmap *pmap;
850 	pt_entry_t *pte, entry;
851 	bool modified;
852 	vaddr_t va;
853 	int s;
854 
855 	modified = pvh->pvh_flags & PVH_MODIFIED;
856 	if (!modified)
857 		return (false);
858 
859 	pvh->pvh_flags &= ~PVH_MODIFIED;
860 
861 	s = splvm();
862 	if (SLIST_EMPTY(&pvh->pvh_head)) {/* no map on this page */
863 		splx(s);
864 		return (true);
865 	}
866 
867 	/* Write-back and invalidate TLB entry */
868 	if (!SH_HAS_VIRTUAL_ALIAS && SH_HAS_WRITEBACK_CACHE)
869 		sh_dcache_wbinv_all();
870 
871 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
872 		pmap = pv->pv_pmap;
873 		va = pv->pv_va;
874 		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL)
875 			continue;
876 		entry = *pte;
877 		if ((entry & PG_D) == 0)
878 			continue;
879 
880 		if (SH_HAS_VIRTUAL_ALIAS)
881 			sh_dcache_wbinv_range_index(va, PAGE_SIZE);
882 
883 		*pte = entry & ~PG_D;
884 		if (pmap->pm_asid != -1)
885 			sh_tlb_invalidate_addr(pmap->pm_asid, va);
886 	}
887 	splx(s);
888 
889 	return (true);
890 }
891 
892 paddr_t
893 pmap_phys_address(paddr_t cookie)
894 {
895 
896 	return (sh3_ptob(cookie));
897 }
898 
899 #ifdef SH4
900 /*
901  * pmap_prefer(vaddr_t foff, vaddr_t *vap)
902  *
903  * Find first virtual address >= *vap that doesn't cause
904  * a virtual cache alias against vaddr_t foff.
905  */
906 void
907 pmap_prefer(vaddr_t foff, vaddr_t *vap)
908 {
909 	vaddr_t va;
910 
911 	if (SH_HAS_VIRTUAL_ALIAS) {
912 		va = *vap;
913 
914 		*vap = va + ((foff - va) & sh_cache_prefer_mask);
915 	}
916 }
917 #endif /* SH4 */
918 
919 /*
920  * pv_entry pool allocator:
921  *	void *__pmap_pv_page_alloc(struct pool *pool, int flags):
922  *	void __pmap_pv_page_free(struct pool *pool, void *v):
923  */
924 void *
925 __pmap_pv_page_alloc(struct pool *pool, int flags)
926 {
927 	struct vm_page *pg;
928 
929 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
930 	if (pg == NULL)
931 		return (NULL);
932 
933 	return ((void *)SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg)));
934 }
935 
936 void
937 __pmap_pv_page_free(struct pool *pool, void *v)
938 {
939 	vaddr_t va = (vaddr_t)v;
940 
941 	/* Invalidate cache for next use of this page */
942 	if (SH_HAS_VIRTUAL_ALIAS)
943 		sh_icache_sync_range_index(va, PAGE_SIZE);
944 	uvm_pagefree(PHYS_TO_VM_PAGE(SH3_P1SEG_TO_PHYS(va)));
945 }
946 
947 /*
948  * pt_entry_t __pmap_pte_alloc(pmap_t pmap, vaddr_t va):
949  *	lookup page table entry. if found returns it, else allocate it.
950  *	page table is accessed via P1.
951  */
952 pt_entry_t *
953 __pmap_pte_alloc(pmap_t pmap, vaddr_t va)
954 {
955 	struct vm_page *pg;
956 	pt_entry_t *ptp, *pte;
957 
958 	if ((pte = __pmap_pte_lookup(pmap, va)) != NULL)
959 		return (pte);
960 
961 	/* Allocate page table (not managed page) */
962 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE | UVM_PGA_ZERO);
963 	if (pg == NULL)
964 		return NULL;
965 
966 	ptp = (pt_entry_t *)SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
967 	pmap->pm_ptp[__PMAP_PTP_INDEX(va)] = ptp;
968 
969 	return (ptp + __PMAP_PTP_OFSET(va));
970 }
971 
972 /*
973  * pt_entry_t *__pmap_pte_lookup(pmap_t pmap, vaddr_t va):
974  *	lookup page table entry, if not allocated, returns NULL.
975  */
976 pt_entry_t *
977 __pmap_pte_lookup(pmap_t pmap, vaddr_t va)
978 {
979 	pt_entry_t *ptp;
980 
981 	if (pmap == pmap_kernel())
982 		return (__pmap_kpte_lookup(va));
983 
984 	/* Lookup page table page */
985 	ptp = pmap->pm_ptp[__PMAP_PTP_INDEX(va)];
986 	if (ptp == NULL)
987 		return (NULL);
988 
989 	return (ptp + __PMAP_PTP_OFSET(va));
990 }
991 
992 /*
993  * pt_entry_t *__pmap_kpte_lookup(vaddr_t va):
994  *	kernel virtual only version of __pmap_pte_lookup().
995  */
996 pt_entry_t *
997 __pmap_kpte_lookup(vaddr_t va)
998 {
999 	pt_entry_t *ptp;
1000 
1001 	ptp = __pmap_kernel.pm_ptp[__PMAP_PTP_INDEX(va-VM_MIN_KERNEL_ADDRESS)];
1002 	if (ptp == NULL)
1003 		return NULL;
1004 
1005 	return (ptp + __PMAP_PTP_OFSET(va));
1006 }
1007 
1008 /*
1009  * bool __pmap_pte_load(pmap_t pmap, vaddr_t va, int flags):
1010  *	lookup page table entry, if found it, load to TLB.
1011  *	flags specify do emulate reference and/or modified bit or not.
1012  */
1013 bool
1014 __pmap_pte_load(pmap_t pmap, vaddr_t va, int flags)
1015 {
1016 	struct vm_page *pg;
1017 	pt_entry_t *pte;
1018 	pt_entry_t entry;
1019 
1020 	KDASSERT((((int)va < 0) && (pmap == pmap_kernel())) ||
1021 	    (((int)va >= 0) && (pmap != pmap_kernel())));
1022 
1023 	/* Lookup page table entry */
1024 	if (((pte = __pmap_pte_lookup(pmap, va)) == NULL) ||
1025 	    ((entry = *pte) == 0))
1026 		return (false);
1027 
1028 	KDASSERT(va != 0);
1029 
1030 	/* Emulate reference/modified tracking for managed page. */
1031 	if (flags != 0 && (pg = PHYS_TO_VM_PAGE(entry & PG_PPN)) != NULL) {
1032 		struct vm_page_md *pvh = VM_PAGE_TO_MD(pg);
1033 
1034 		if (flags & PVH_REFERENCED) {
1035 			pvh->pvh_flags |= PVH_REFERENCED;
1036 			entry |= PG_V;
1037 		}
1038 		if (flags & PVH_MODIFIED) {
1039 			pvh->pvh_flags |= PVH_MODIFIED;
1040 			entry |= PG_D;
1041 		}
1042 		*pte = entry;
1043 	}
1044 
1045 	/* When pmap has valid ASID, register to TLB */
1046 	if (pmap->pm_asid != -1)
1047 		sh_tlb_update(pmap->pm_asid, va, entry);
1048 
1049 	return (true);
1050 }
1051 
1052 /*
1053  * int __pmap_asid_alloc(void):
1054  *	Allocate new ASID. if all ASID is used, steal from other process.
1055  */
1056 int
1057 __pmap_asid_alloc(void)
1058 {
1059 	struct proc *p;
1060 	int i, j, k, n, map, asid;
1061 
1062 	/* Search free ASID */
1063 	i = __pmap_asid.hint >> 5;
1064 	n = i + 8;
1065 	for (; i < n; i++) {
1066 		k = i & 0x7;
1067 		map = __pmap_asid.map[k];
1068 		for (j = 0; j < 32; j++) {
1069 			if ((map & (1 << j)) == 0 && (k + j) != 0) {
1070 				__pmap_asid.map[k] |= (1 << j);
1071 				__pmap_asid.hint = (k << 5) + j;
1072 				return (__pmap_asid.hint);
1073 			}
1074 		}
1075 	}
1076 
1077 	/* Steal ASID */
1078 	LIST_FOREACH(p, &allproc, p_list) {
1079 		if ((asid = p->p_vmspace->vm_map.pmap->pm_asid) > 0) {
1080 			pmap_t pmap = p->p_vmspace->vm_map.pmap;
1081 			pmap->pm_asid = -1;
1082 			__pmap_asid.hint = asid;
1083 			/* Invalidate all old ASID entry */
1084 			sh_tlb_invalidate_asid(pmap->pm_asid);
1085 
1086 			return (__pmap_asid.hint);
1087 		}
1088 	}
1089 
1090 	panic("No ASID allocated.");
1091 	/* NOTREACHED */
1092 }
1093 
1094 /*
1095  * void __pmap_asid_free(int asid):
1096  *	Return unused ASID to pool. and remove all TLB entry of ASID.
1097  */
1098 void
1099 __pmap_asid_free(int asid)
1100 {
1101 	int i;
1102 
1103 	if (asid < 1)	/* Don't invalidate kernel ASID 0 */
1104 		return;
1105 
1106 	sh_tlb_invalidate_asid(asid);
1107 
1108 	i = asid >> 5;
1109 	__pmap_asid.map[i] &= ~(1 << (asid - (i << 5)));
1110 }
1111