xref: /netbsd/sys/arch/sh3/sh3/pmap.c (revision bf9ec67e)
1 /*	$NetBSD: pmap.c,v 1.40 2002/05/09 12:28:08 uch Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by UCHIYAMA Yasushi.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/pool.h>
42 #include <sys/msgbuf.h>
43 
44 #include <uvm/uvm.h>
45 
46 #include <sh3/mmu.h>
47 #include <sh3/cache.h>
48 
49 #ifdef DEBUG
50 #define	STATIC
51 #else
52 #define	STATIC	static
53 #endif
54 
55 #define	__PMAP_PTP_SHIFT	22
56 #define	__PMAP_PTP_TRUNC(va)						\
57 	(((va) + (1 << __PMAP_PTP_SHIFT) - 1) & ~((1 << __PMAP_PTP_SHIFT) - 1))
58 #define	__PMAP_PTP_PG_N		(NBPG / sizeof(pt_entry_t))
59 #define	__PMAP_PTP_INDEX(va)	(((va) >> __PMAP_PTP_SHIFT) & (__PMAP_PTP_N - 1))
60 #define	__PMAP_PTP_OFSET(va)	((va >> PGSHIFT) & (__PMAP_PTP_PG_N - 1))
61 
62 struct pmap __pmap_kernel;
63 STATIC vaddr_t __pmap_kve;	/* VA of last kernel virtual */
64 paddr_t avail_start;		/* PA of first available physical page */
65 paddr_t avail_end;		/* PA of last available physical page */
66 
67 /* pmap pool */
68 STATIC struct pool __pmap_pmap_pool;
69 
70 /* pv_entry ops. */
71 struct pv_entry {
72 	struct pmap *pv_pmap;
73 	vaddr_t pv_va;
74 	SLIST_ENTRY(pv_entry) pv_link;
75 };
76 #define	__pmap_pv_alloc()	pool_get(&__pmap_pv_pool, PR_NOWAIT)
77 #define	__pmap_pv_free(pv)	pool_put(&__pmap_pv_pool, (pv))
78 STATIC void __pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t);
79 STATIC void __pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t);
80 STATIC void *__pmap_pv_page_alloc(struct pool *, int);
81 STATIC void __pmap_pv_page_free(struct pool *, void *);
82 STATIC struct pool __pmap_pv_pool;
83 STATIC struct pool_allocator pmap_pv_page_allocator = {
84 	__pmap_pv_page_alloc, __pmap_pv_page_free, 0,
85 };
86 
87 /* ASID ops. */
88 STATIC int __pmap_asid_alloc(void);
89 STATIC void __pmap_asid_free(int);
90 STATIC struct {
91 	u_int32_t map[8];
92 	int hint;	/* hint for next allocation */
93 } __pmap_asid;
94 
95 /* page table entry ops. */
96 STATIC pt_entry_t *__pmap_pte_alloc(pmap_t, vaddr_t);
97 
98 /* pmap_enter util */
99 STATIC boolean_t __pmap_map_change(pmap_t, vaddr_t, paddr_t, vm_prot_t,
100     pt_entry_t);
101 
102 void
103 pmap_bootstrap()
104 {
105 	size_t sz;
106 	caddr_t v;
107 
108 	/* Steal msgbuf area */
109 	initmsgbuf((caddr_t)uvm_pageboot_alloc(MSGBUFSIZE), MSGBUFSIZE);
110 
111 	/* Allocate space for system data structures. */
112 	sz = (size_t)allocsys(NULL, NULL);
113 	v = (caddr_t)uvm_pageboot_alloc(sz);
114 	if ((allocsys(v, NULL) - v) != sz)
115 		panic("pmap_bootstrap: table size inconsistency");
116 
117 	avail_start = ptoa(vm_physmem[0].start);
118 	avail_end = ptoa(vm_physmem[vm_nphysseg - 1].end);
119 	__pmap_kve = VM_MIN_KERNEL_ADDRESS;
120 
121 	/* Initialize pmap module */
122 	pool_init(&__pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
123 	    &pool_allocator_nointr);
124 	pool_init(&__pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
125 	    &pmap_pv_page_allocator);
126 	pool_setlowat(&__pmap_pv_pool, 16);
127 
128 	pmap_kernel()->pm_refcnt = 1;
129 	pmap_kernel()->pm_ptp = (pt_entry_t **)uvm_pageboot_alloc(PAGE_SIZE);
130 	memset(pmap_kernel()->pm_ptp, 0, PAGE_SIZE);
131 
132 	/* Enable MMU */
133 	sh_mmu_start();
134 	/* Mask all interrupt */
135 	_cpu_intr_suspend();
136 	/* Enable exception for P3 access */
137 	_cpu_exception_resume(0);
138 }
139 
140 vaddr_t
141 pmap_steal_memory(vsize_t size, vaddr_t *vstart, vaddr_t *vend)
142 {
143 	struct vm_physseg *bank;
144 	int i, j, npage;
145 	paddr_t pa;
146 	vaddr_t va;
147 
148 	KDASSERT(!uvm.page_init_done);
149 
150 	size = round_page(size);
151 	npage = atop(size);
152 
153 	for (i = 0, bank = &vm_physmem[i]; i < vm_nphysseg; i++, bank++)
154 		if (npage <= bank->avail_end - bank->avail_start)
155 			break;
156 	KDASSERT(i != vm_nphysseg);
157 
158 	/* Steal pages */
159 	pa = ptoa(bank->avail_start);
160 	bank->avail_start += npage;
161 	bank->start += npage;
162 
163 	/* GC memory bank */
164 	if (bank->avail_start == bank->end) {
165 		/* Remove this segment from the list. */
166 		vm_nphysseg--;
167 		KDASSERT(vm_nphysseg > 0);
168 		for (j = i; i < vm_nphysseg; j++)
169 			vm_physmem[j] = vm_physmem[j + 1];
170 	}
171 
172 	va = SH3_PHYS_TO_P1SEG(pa);
173 	memset((caddr_t)va, 0, size);
174 
175 	return (va);
176 }
177 
178 vaddr_t
179 pmap_growkernel(vaddr_t maxkvaddr)
180 {
181 	int i, n;
182 
183 	if (maxkvaddr <= __pmap_kve)
184 		return (__pmap_kve);
185 
186 	i = __PMAP_PTP_INDEX(__pmap_kve - VM_MIN_KERNEL_ADDRESS);
187 	__pmap_kve = __PMAP_PTP_TRUNC(maxkvaddr);
188 	n = __PMAP_PTP_INDEX(__pmap_kve - VM_MIN_KERNEL_ADDRESS);
189 
190 	/* Allocate page table pages */
191 	for (;i < n; i++) {
192 		if (__pmap_kernel.pm_ptp[i] != NULL)
193 			continue;
194 
195 		if (uvm.page_init_done) {
196 			struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL,
197 			    UVM_PGA_USERESERVE | UVM_PGA_ZERO);
198 			if (pg == NULL)
199 				goto error;
200 			__pmap_kernel.pm_ptp[i] = (pt_entry_t *)
201 			    SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
202 		} else {
203 			pt_entry_t *ptp = (pt_entry_t *)
204 			    uvm_pageboot_alloc(PAGE_SIZE);
205 			if (ptp == NULL)
206 				goto error;
207 			__pmap_kernel.pm_ptp[i] = ptp;
208 			memset(ptp, 0, PAGE_SIZE);
209 		}
210 	}
211 
212 	return (__pmap_kve);
213  error:
214 	panic("pmap_growkernel: out of memory.");
215 }
216 
217 void
218 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
219 {
220 
221 	*start = VM_MIN_KERNEL_ADDRESS;
222 	*end = VM_MAX_KERNEL_ADDRESS;
223 }
224 
225 void
226 pmap_init()
227 {
228 
229 	/* Nothing to do */
230 }
231 
232 pmap_t
233 pmap_create()
234 {
235 	pmap_t pmap;
236 
237 	pmap = pool_get(&__pmap_pmap_pool, PR_WAITOK);
238 	memset(pmap, 0, sizeof(struct pmap));
239 	pmap->pm_asid = -1;
240 	pmap->pm_refcnt = 1;
241 	/* Allocate page table page holder (512 slot) */
242 	pmap->pm_ptp = (pt_entry_t **)
243 	    SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(
244 		    uvm_pagealloc(NULL, 0, NULL,
245 			UVM_PGA_USERESERVE | UVM_PGA_ZERO)));
246 
247 	return (pmap);
248 }
249 
250 void
251 pmap_destroy(pmap_t pmap)
252 {
253 	int i;
254 
255 	if (--pmap->pm_refcnt > 0)
256 		return;
257 
258 	/* Deallocate all page table page */
259 	for (i = 0; i < __PMAP_PTP_N; i++) {
260 		vaddr_t va = (vaddr_t)pmap->pm_ptp[i];
261 		if (va == 0)
262 			continue;
263 #ifdef DEBUG	/* Check no mapping exists. */
264 		{
265 			int j;
266 			pt_entry_t *pte = (pt_entry_t *)va;
267 			for (j = 0; j < __PMAP_PTP_PG_N; j++, pte++)
268 				KDASSERT(*pte == 0);
269 		}
270 #endif /* DEBUG */
271 		/* Purge cache entry for next use of this page. */
272 		if (SH_HAS_VIRTUAL_ALIAS)
273 			sh_dcache_inv_range(va, PAGE_SIZE);
274 		/* Free page table */
275 		uvm_pagefree(PHYS_TO_VM_PAGE(SH3_P1SEG_TO_PHYS(va)));
276 	}
277 	/* Deallocate page table page holder */
278 	if (SH_HAS_VIRTUAL_ALIAS)
279 		sh_dcache_inv_range((vaddr_t)pmap->pm_ptp, PAGE_SIZE);
280 	uvm_pagefree(PHYS_TO_VM_PAGE(SH3_P1SEG_TO_PHYS((vaddr_t)pmap->pm_ptp)));
281 
282 	/* Free ASID */
283 	__pmap_asid_free(pmap->pm_asid);
284 
285 	pool_put(&__pmap_pmap_pool, pmap);
286 }
287 
288 void
289 pmap_reference(pmap_t pmap)
290 {
291 
292 	pmap->pm_refcnt++;
293 }
294 
295 void
296 pmap_activate(struct proc *p)
297 {
298 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
299 
300 	if (pmap->pm_asid == -1)
301 		pmap->pm_asid = __pmap_asid_alloc();
302 
303 	KDASSERT(pmap->pm_asid >=0 && pmap->pm_asid < 256);
304 	sh_tlb_set_asid(pmap->pm_asid);
305 }
306 
307 void
308 pmap_deactivate(struct proc *p)
309 {
310 
311 	/* Nothing to do */
312 }
313 
314 int
315 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
316 {
317 	struct vm_page *pg;
318 	struct vm_page_md *pvh;
319 	pt_entry_t entry;
320 	boolean_t kva = (pmap == pmap_kernel());
321 
322 	/* "flags" never exceed "prot" */
323 	KDASSERT(prot != 0 && ((flags & VM_PROT_ALL) & ~prot) == 0);
324 
325 	pg = PHYS_TO_VM_PAGE(pa);
326 	entry = (pa & PG_PPN) | PG_4K;
327 	if (flags & PMAP_WIRED)
328 		entry |= _PG_WIRED;
329 
330 	if (pg != NULL) {	/* memory-space */
331 		pvh = &pg->mdpage;
332 		entry |= PG_C;	/* always cached */
333 
334 		/* Modified/reference tracking */
335 		if (flags & VM_PROT_WRITE) {
336 			entry |= PG_V | PG_D;
337 			pvh->pvh_flags |= PVH_MODIFIED | PVH_REFERENCED;
338 		} else if (flags & VM_PROT_ALL) {
339 			entry |= PG_V;
340 			pvh->pvh_flags |= PVH_REFERENCED;
341 		}
342 
343 		/* Protection */
344 		if ((prot & VM_PROT_WRITE) && (pvh->pvh_flags & PVH_MODIFIED)) {
345 			if (kva)
346 				entry |= PG_PR_KRW | PG_SH;
347 			else
348 				entry |= PG_PR_URW;
349 		} else {
350 			/* RO, COW page */
351 			if (kva)
352 				entry |= PG_PR_KRO | PG_SH;
353 			else
354 				entry |= PG_PR_URO;
355 		}
356 
357 		/* Check for existing mapping */
358 		if (__pmap_map_change(pmap, va, pa, prot, entry))
359 			return (0);
360 
361 		/* Add to physical-virtual map list of this page */
362 		__pmap_pv_enter(pmap, pg, va);
363 
364 	} else {	/* bus-space (always uncached map) */
365 		KDASSERT(kva);
366 		entry |= PG_V | PG_SH |
367 		    (prot & VM_PROT_WRITE) ? (PG_PR_KRW | PG_D) : PG_PR_KRO;
368 	}
369 
370 	/* Register to page table */
371 	*__pmap_pte_alloc(pmap, va) = entry;
372 
373 	if (pmap->pm_asid != -1)
374 		sh_tlb_update(pmap->pm_asid, va, entry);
375 
376 	if (!SH_HAS_UNIFIED_CACHE &&
377 	    (prot == (VM_PROT_READ | VM_PROT_EXECUTE)))
378 		sh_icache_sync_range_index(va, PAGE_SIZE);
379 
380 	if (entry & _PG_WIRED)
381 		pmap->pm_stats.wired_count++;
382 	pmap->pm_stats.resident_count++;
383 
384 	return (0);
385 }
386 
387 /*
388  * boolean_t __pmap_map_change(pmap_t pmap, vaddr_t va, paddr_t pa,
389  *     vm_prot_t prot, pt_entry_t entry):
390  *	Handle the situation that pmap_enter() is called to enter a
391  *	mapping at a virtual address for which a mapping already
392  *	exists.
393  */
394 boolean_t
395 __pmap_map_change(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
396     pt_entry_t entry)
397 {
398 	pt_entry_t *pte, oentry;
399 	vaddr_t eva = va + PAGE_SIZE;
400 
401 	if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
402 	    ((oentry = *pte) == 0))
403 		return (FALSE);		/* no mapping exists. */
404 
405 	if (pa != (oentry & PG_PPN)) {
406 		/* Enter a mapping at a mapping to another physical page. */
407 		pmap_remove(pmap, va, eva);
408 		return (FALSE);
409 	}
410 
411 	/* Pre-existing mapping */
412 
413 	/* Protection change. */
414 	if ((oentry & PG_PR_MASK) != (entry & PG_PR_MASK))
415 		pmap_protect(pmap, va, eva, prot);
416 
417 	/* Wired change */
418 	if (oentry & _PG_WIRED) {
419 		if (!(entry & _PG_WIRED)) {
420 			/* wired -> unwired */
421 			*pte = entry;
422 			/* "wired" is software bits. no need to update TLB */
423 			pmap->pm_stats.wired_count--;
424 		}
425 	} else if (entry & _PG_WIRED) {
426 		/* unwired -> wired. make sure to reflect "flags" */
427 		pmap_remove(pmap, va, eva);
428 		return (FALSE);
429 	}
430 
431 	return (TRUE);	/* mapping was changed. */
432 }
433 
434 /*
435  * void __pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr):
436  *	Insert phisical-virutal map to vm_page.
437  *	Assume pre-existed mapping is already removed.
438  */
439 void
440 __pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va)
441 {
442 	struct vm_page_md *pvh;
443 	struct pv_entry *pv;
444 	int s;
445 
446 	s = splvm();
447 	if (SH_HAS_VIRTUAL_ALIAS) {
448 		/* Remove all other mapping on this physical page */
449 		pvh = &pg->mdpage;
450 		while ((pv = SLIST_FIRST(&pvh->pvh_head)) != NULL) {
451 			pmap_remove(pv->pv_pmap, pv->pv_va,
452 			    pv->pv_va + PAGE_SIZE);
453 		}
454 	}
455 
456 	/* Register pv map */
457 	pvh = &pg->mdpage;
458 	pv = __pmap_pv_alloc();
459 	pv->pv_pmap = pmap;
460 	pv->pv_va = va;
461 
462 	SLIST_INSERT_HEAD(&pvh->pvh_head, pv, pv_link);
463 	splx(s);
464 }
465 
466 void
467 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
468 {
469 	struct vm_page *pg;
470 	pt_entry_t *pte, entry;
471 	vaddr_t va;
472 
473 	KDASSERT((sva & PGOFSET) == 0);
474 
475 	for (va = sva; va < eva; va += PAGE_SIZE) {
476 		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
477 		    (entry = *pte) == 0)
478 			continue;
479 
480 		if ((pg = PHYS_TO_VM_PAGE(entry & PG_PPN)) != NULL)
481 			__pmap_pv_remove(pmap, pg, va);
482 
483 		if (entry & _PG_WIRED)
484 			pmap->pm_stats.wired_count--;
485 		pmap->pm_stats.resident_count--;
486 		*pte = 0;
487 
488 		/*
489 		 * When pmap->pm_asid == -1 (invalid ASID), old entry attribute
490 		 * to this pmap is already removed by pmap_activate().
491 		 */
492 		if (pmap->pm_asid != -1)
493 			sh_tlb_invalidate_addr(pmap->pm_asid, va);
494 	}
495 }
496 
497 /*
498  * void __pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr):
499  *	Remove phisical-virutal map from vm_page.
500  */
501 void
502 __pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr)
503 {
504 	struct vm_page_md *pvh;
505 	struct pv_entry *pv;
506 	int s;
507 
508 	s = splvm();
509 	pvh = &pg->mdpage;
510 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
511 		if (pv->pv_pmap == pmap && pv->pv_va == vaddr) {
512 			if (SH_HAS_VIRTUAL_ALIAS ||
513 			    (SH_HAS_WRITEBACK_CACHE &&
514 				(pg->mdpage.pvh_flags & PVH_MODIFIED))) {
515 				/*
516 				 * Always use index ops. since I don't want to
517 				 * worry about address space.
518 				 */
519 				sh_dcache_wbinv_range_index
520 				    (pv->pv_va, PAGE_SIZE);
521 			}
522 
523 			SLIST_REMOVE(&pvh->pvh_head, pv, pv_entry, pv_link);
524 			__pmap_pv_free(pv);
525 			break;
526 		}
527 	}
528 #ifdef DEBUG
529 	/* Check duplicated map. */
530 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link)
531 	    KDASSERT(!(pv->pv_pmap == pmap && pv->pv_va == vaddr));
532 #endif
533 	splx(s);
534 }
535 
536 void
537 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
538 {
539 	pt_entry_t *pte, entry;
540 
541 	KDASSERT((va & PGOFSET) == 0);
542 	KDASSERT(va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS);
543 
544 	entry = (pa & PG_PPN) | PG_V | PG_SH | PG_4K;
545 	if (prot & VM_PROT_WRITE)
546 		entry |= (PG_PR_KRW | PG_D);
547 	else
548 		entry |= PG_PR_KRO;
549 
550 	if (PHYS_TO_VM_PAGE(pa))
551 		entry |= PG_C;
552 
553 	pte = __pmap_kpte_lookup(va);
554 
555 	KDASSERT(*pte == 0);
556 	*pte = entry;
557 
558 	sh_tlb_update(0, va, entry);
559 }
560 
561 void
562 pmap_kremove(vaddr_t va, vsize_t len)
563 {
564 	pt_entry_t *pte;
565 	vaddr_t eva = va + len;
566 
567 	KDASSERT((va & PGOFSET) == 0);
568 	KDASSERT((len & PGOFSET) == 0);
569 	KDASSERT(va >= VM_MIN_KERNEL_ADDRESS && eva <= VM_MAX_KERNEL_ADDRESS);
570 
571 	for (; va < eva; va += PAGE_SIZE) {
572 		pte = __pmap_kpte_lookup(va);
573 		KDASSERT(pte != NULL);
574 		if (*pte == 0)
575 			continue;
576 
577 		if (SH_HAS_VIRTUAL_ALIAS && PHYS_TO_VM_PAGE(*pte & PG_PPN))
578 			sh_dcache_wbinv_range(va, PAGE_SIZE);
579 		*pte = 0;
580 
581 		sh_tlb_invalidate_addr(0, va);
582 	}
583 }
584 
585 boolean_t
586 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
587 {
588 	pt_entry_t *pte = __pmap_pte_lookup(pmap, va);
589 
590 	if (pte == NULL || *pte == 0)
591 		return (FALSE);
592 
593 	if (pap != NULL)
594 		*pap = (*pte & PG_PPN) | (va & PGOFSET);
595 
596 	return (TRUE);
597 }
598 
599 void
600 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
601 {
602 	boolean_t kernel = pmap == pmap_kernel();
603 	pt_entry_t *pte, entry;
604 	vaddr_t va;
605 
606 	sva = trunc_page(sva);
607 
608 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
609 		pmap_remove(pmap, sva, eva);
610 		return;
611 	}
612 
613 	for (va = sva; va < eva; va += PAGE_SIZE) {
614 
615 		if (((pte = __pmap_pte_lookup(pmap, va)) == NULL) ||
616 		    (entry = *pte) == 0)
617 			continue;
618 
619 		if (SH_HAS_VIRTUAL_ALIAS && (entry & PG_D)) {
620 			if (!SH_HAS_UNIFIED_CACHE && (prot & VM_PROT_EXECUTE))
621 				sh_icache_sync_range_index(va, PAGE_SIZE);
622 			else
623 				sh_dcache_wbinv_range_index(va, PAGE_SIZE);
624 		}
625 
626 		entry &= ~PG_PR_MASK;
627 		switch (prot) {
628 		default:
629 			panic("pmap_protect: invalid protection mode %x", prot);
630 		case VM_PROT_READ:
631 			/* FALLTHROUGH */
632 		case VM_PROT_READ | VM_PROT_EXECUTE:
633 			entry |= kernel ? PG_PR_KRO : PG_PR_URO;
634 			break;
635 		case VM_PROT_READ | VM_PROT_WRITE:
636 			/* FALLTHROUGH */
637 		case VM_PROT_ALL:
638 			entry |= kernel ? PG_PR_KRW : PG_PR_URW;
639 			break;
640 		}
641 		*pte = entry;
642 
643 		if (pmap->pm_asid != -1)
644 			sh_tlb_update(pmap->pm_asid, va, entry);
645 	}
646 }
647 
648 void
649 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
650 {
651 	struct vm_page_md *pvh = &pg->mdpage;
652 	struct pv_entry *pv;
653 	struct pmap *pmap;
654 	vaddr_t va;
655 	int s;
656 
657 	switch (prot) {
658 	case VM_PROT_READ | VM_PROT_WRITE:
659 		/* FALLTHROUGH */
660 	case VM_PROT_ALL:
661 		break;
662 
663 	case VM_PROT_READ:
664 		/* FALLTHROUGH */
665 	case VM_PROT_READ | VM_PROT_EXECUTE:
666 		s = splvm();
667 		SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
668 			pmap = pv->pv_pmap;
669 			va = pv->pv_va;
670 
671 			KDASSERT(pmap);
672 			pmap_protect(pmap, va, va + PAGE_SIZE, prot);
673 		}
674 		splx(s);
675 		break;
676 
677 	default:
678 		/* Remove all */
679 		s = splvm();
680 		while ((pv = SLIST_FIRST(&pvh->pvh_head)) != NULL) {
681 			va = pv->pv_va;
682 			pmap_remove(pv->pv_pmap, va, va + PAGE_SIZE);
683 		}
684 		splx(s);
685 	}
686 }
687 
688 void
689 pmap_unwire(pmap_t pmap, vaddr_t va)
690 {
691 	pt_entry_t *pte, entry;
692 
693 	if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
694 	    (entry = *pte) == 0 ||
695 	    (entry & _PG_WIRED) == 0)
696 		return;
697 
698 	*pte = entry & ~_PG_WIRED;
699 	pmap->pm_stats.wired_count--;
700 }
701 
702 void
703 pmap_procwr(struct proc	*p, vaddr_t va, size_t len)
704 {
705 
706 	if (!SH_HAS_UNIFIED_CACHE)
707 		sh_icache_sync_range_index(va, len);
708 }
709 
710 void
711 pmap_zero_page(paddr_t phys)
712 {
713 
714 	if (SH_HAS_VIRTUAL_ALIAS) {	/* don't polute cache */
715 		/* sync cache since we access via P2. */
716 		sh_dcache_wbinv_all();
717 		memset((void *)SH3_PHYS_TO_P2SEG(phys), 0, PAGE_SIZE);
718 	} else {
719 		memset((void *)SH3_PHYS_TO_P1SEG(phys), 0, PAGE_SIZE);
720 	}
721 }
722 
723 void
724 pmap_copy_page(paddr_t src, paddr_t dst)
725 {
726 
727 	if (SH_HAS_VIRTUAL_ALIAS) {	/* don't polute cache */
728 		/* sync cache since we access via P2. */
729 		sh_dcache_wbinv_all();
730 		memcpy((void *)SH3_PHYS_TO_P2SEG(dst),
731 		    (void *)SH3_PHYS_TO_P2SEG(src), PAGE_SIZE);
732 	} else {
733 		memcpy((void *)SH3_PHYS_TO_P1SEG(dst),
734 		    (void *)SH3_PHYS_TO_P1SEG(src), PAGE_SIZE);
735 	}
736 }
737 
738 boolean_t
739 pmap_is_referenced(struct vm_page *pg)
740 {
741 
742 	return ((pg->mdpage.pvh_flags & PVH_REFERENCED) ? TRUE : FALSE);
743 }
744 
745 boolean_t
746 pmap_clear_reference(struct vm_page *pg)
747 {
748 	struct vm_page_md *pvh = &pg->mdpage;
749 	struct pv_entry *pv;
750 	pt_entry_t *pte;
751 	pmap_t pmap;
752 	vaddr_t va;
753 	int s;
754 
755 	if ((pg->mdpage.pvh_flags & PVH_REFERENCED) == 0)
756 		return (FALSE);
757 
758 	pg->mdpage.pvh_flags &= ~PVH_REFERENCED;
759 
760 	s = splvm();
761 	/* Restart reference bit emulation */
762 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
763 		pmap = pv->pv_pmap;
764 		va = pv->pv_va;
765 
766 		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL)
767 			continue;
768 		if ((*pte & PG_V) == 0)
769 			continue;
770 		*pte &= ~PG_V;
771 
772 		if (pmap->pm_asid != -1)
773 			sh_tlb_invalidate_addr(pmap->pm_asid, va);
774 	}
775 	splx(s);
776 
777 	return (TRUE);
778 }
779 
780 boolean_t
781 pmap_is_modified(struct vm_page *pg)
782 {
783 
784 	return ((pg->mdpage.pvh_flags & PVH_MODIFIED) ? TRUE : FALSE);
785 }
786 
787 boolean_t
788 pmap_clear_modify(struct vm_page *pg)
789 {
790 	struct vm_page_md *pvh = &pg->mdpage;
791 	struct pv_entry *pv;
792 	struct pmap *pmap;
793 	pt_entry_t *pte, entry;
794 	boolean_t modified;
795 	vaddr_t va;
796 	int s;
797 
798 	modified = pvh->pvh_flags & PVH_MODIFIED;
799 	if (!modified)
800 		return (FALSE);
801 
802 	pvh->pvh_flags &= ~PVH_MODIFIED;
803 
804 	s = splvm();
805 	if (SLIST_EMPTY(&pvh->pvh_head)) {/* no map on this page */
806 		splx(s);
807 		return (TRUE);
808 	}
809 
810 	/* Write-back and invalidate TLB entry */
811 	if (!SH_HAS_VIRTUAL_ALIAS && SH_HAS_WRITEBACK_CACHE)
812 		sh_dcache_wbinv_all();
813 
814 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
815 		pmap = pv->pv_pmap;
816 		va = pv->pv_va;
817 		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL)
818 			continue;
819 		entry = *pte;
820 		if ((entry & PG_D) == 0)
821 			continue;
822 
823 		if (SH_HAS_VIRTUAL_ALIAS)
824 			sh_dcache_wbinv_range_index(va, PAGE_SIZE);
825 
826 		*pte = entry & ~PG_D;
827 		if (pmap->pm_asid != -1)
828 			sh_tlb_invalidate_addr(pmap->pm_asid, va);
829 	}
830 	splx(s);
831 
832 	return (TRUE);
833 }
834 
835 paddr_t
836 pmap_phys_address(int cookie)
837 {
838 
839 	return (sh3_ptob(cookie));
840 }
841 
842 /*
843  * pv_entry pool allocator:
844  *	void *__pmap_pv_page_alloc(struct pool *pool, int flags):
845  *	void __pmap_pv_page_free(struct pool *pool, void *v):
846  */
847 void *
848 __pmap_pv_page_alloc(struct pool *pool, int flags)
849 {
850 	struct vm_page *pg;
851 
852 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
853 	if (pg == NULL)
854 		return (NULL);
855 
856 	return ((void *)SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg)));
857 }
858 
859 void
860 __pmap_pv_page_free(struct pool *pool, void *v)
861 {
862 	vaddr_t va = (vaddr_t)v;
863 
864 	/* Invalidate cache for next use of this page */
865 	if (SH_HAS_VIRTUAL_ALIAS)
866 		sh_icache_sync_range_index(va, PAGE_SIZE);
867 	uvm_pagefree(PHYS_TO_VM_PAGE(SH3_P1SEG_TO_PHYS(va)));
868 }
869 
870 /*
871  * pt_entry_t __pmap_pte_alloc(pmap_t pmap, vaddr_t va):
872  *	lookup page table entry. if found returns it, else allocate it.
873  *	page table is accessed via P1.
874  */
875 pt_entry_t *
876 __pmap_pte_alloc(pmap_t pmap, vaddr_t va)
877 {
878 	struct vm_page *pg;
879 	pt_entry_t *ptp, *pte;
880 
881 	if ((pte = __pmap_pte_lookup(pmap, va)) != NULL)
882 		return (pte);
883 
884 	/* Allocate page table (not managed page) */
885 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE | UVM_PGA_ZERO);
886 
887 	ptp = (pt_entry_t *)SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
888 	pmap->pm_ptp[__PMAP_PTP_INDEX(va)] = ptp;
889 
890 	return (ptp + __PMAP_PTP_OFSET(va));
891 }
892 
893 /*
894  * pt_entry_t *__pmap_pte_lookup(pmap_t pmap, vaddr_t va):
895  *	lookup page table entry, if not allocated, returns NULL.
896  */
897 pt_entry_t *
898 __pmap_pte_lookup(pmap_t pmap, vaddr_t va)
899 {
900 	pt_entry_t *ptp;
901 
902 	if (pmap == pmap_kernel())
903 		return (__pmap_kpte_lookup(va));
904 
905 	/* Lookup page table page */
906 	ptp = pmap->pm_ptp[__PMAP_PTP_INDEX(va)];
907 	if (ptp == NULL)
908 		return (NULL);
909 
910 	return (ptp + __PMAP_PTP_OFSET(va));
911 }
912 
913 /*
914  * pt_entry_t *__pmap_kpte_lookup(vaddr_t va):
915  *	kernel virtual only version of __pmap_pte_lookup().
916  */
917 pt_entry_t *
918 __pmap_kpte_lookup(vaddr_t va)
919 {
920 
921 	return (__pmap_kernel.pm_ptp
922 	    [__PMAP_PTP_INDEX(va - VM_MIN_KERNEL_ADDRESS)] +
923 	    __PMAP_PTP_OFSET(va));
924 }
925 
926 /*
927  * boolean_t __pmap_pte_load(pmap_t pmap, vaddr_t va, int flags):
928  *	lookup page table entry, if found it, load to TLB.
929  *	flags specify do emulate reference and/or modified bit or not.
930  */
931 boolean_t
932 __pmap_pte_load(pmap_t pmap, vaddr_t va, int flags)
933 {
934 	struct vm_page *pg;
935 	pt_entry_t *pte;
936 	pt_entry_t entry;
937 
938 	KDASSERT((((int)va < 0) && (pmap == pmap_kernel())) ||
939 	    (((int)va >= 0) && (pmap != pmap_kernel())));
940 
941 	/* Lookup page table entry */
942 	if (((pte = __pmap_pte_lookup(pmap, va)) == NULL) ||
943 	    ((entry = *pte) == 0))
944 		return (FALSE);
945 
946 	KDASSERT(va != 0);
947 
948 	/* Emulate reference/modified tracking for managed page. */
949 	if (flags != 0 && (pg = PHYS_TO_VM_PAGE(entry & PG_PPN)) != NULL) {
950 		if (flags & PVH_REFERENCED) {
951 			pg->mdpage.pvh_flags |= PVH_REFERENCED;
952 			entry |= PG_V;
953 		}
954 		if (flags & PVH_MODIFIED) {
955 			pg->mdpage.pvh_flags |= PVH_MODIFIED;
956 			entry |= PG_D;
957 		}
958 		*pte = entry;
959 	}
960 
961 	/* When pmap has valid ASID, register to TLB */
962 	if (pmap->pm_asid != -1)
963 		sh_tlb_update(pmap->pm_asid, va, entry);
964 
965 	return (TRUE);
966 }
967 
968 /*
969  * int __pmap_asid_alloc(void):
970  *	Allocate new ASID. if all ASID is used, steal from other process.
971  */
972 int
973 __pmap_asid_alloc()
974 {
975 	struct proc *p;
976 	int i, j, k, n, map, asid;
977 
978 	/* Search free ASID */
979 	i = __pmap_asid.hint >> 5;
980 	n = i + 8;
981 	for (; i < n; i++) {
982 		k = i & 0x7;
983 		map = __pmap_asid.map[k];
984 		for (j = 0; j < 32; j++) {
985 			if ((map & (1 << j)) == 0 && (k + j) != 0) {
986 				__pmap_asid.map[k] |= (1 << j);
987 				__pmap_asid.hint = (k << 5) + j;
988 				return (__pmap_asid.hint);
989 			}
990 		}
991 	}
992 
993 	/* Steal ASID */
994 	LIST_FOREACH(p, &allproc, p_list) {
995 		if ((asid = p->p_vmspace->vm_map.pmap->pm_asid) > 0) {
996 			pmap_t pmap = p->p_vmspace->vm_map.pmap;
997 			pmap->pm_asid = -1;
998 			__pmap_asid.hint = asid;
999 			/* Invalidate all old ASID entry */
1000 			sh_tlb_invalidate_asid(pmap->pm_asid);
1001 
1002 			return (__pmap_asid.hint);
1003 		}
1004 	}
1005 
1006 	panic("No ASID allocated.");
1007 }
1008 
1009 /*
1010  * void __pmap_asid_free(int asid):
1011  *	Return unused ASID to pool. and remove all TLB entry of ASID.
1012  */
1013 void
1014 __pmap_asid_free(int asid)
1015 {
1016 	int i;
1017 
1018 	if (asid < 1)	/* Don't invalidate kernel ASID 0 */
1019 		return;
1020 
1021 	sh_tlb_invalidate_asid(asid);
1022 
1023 	i = asid >> 5;
1024 	__pmap_asid.map[i] &= ~(1 << (asid - (i << 5)));
1025 }
1026