xref: /openbsd/sys/arch/sh/sh/pmap.c (revision 58e38c50)
1 /*	$OpenBSD: pmap.c,v 1.30 2023/01/01 19:49:17 miod Exp $	*/
2 /*	$NetBSD: pmap.c,v 1.55 2006/08/07 23:19:36 tsutsui Exp $	*/
3 
4 /*-
5  * Copyright (c) 2002 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by UCHIYAMA Yasushi.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/proc.h>
36 #include <sys/pool.h>
37 #include <sys/msgbuf.h>
38 
39 #include <uvm/uvm.h>
40 
41 #include <sh/mmu.h>
42 #include <sh/cache.h>
43 
44 #ifdef DEBUG
45 #define	STATIC
46 #else
47 #define	STATIC	static
48 #endif
49 
50 #define	__PMAP_PTP_SHIFT	22
51 #define	__PMAP_PTP_TRUNC(va)						\
52 	(((va) + (1 << __PMAP_PTP_SHIFT) - 1) & ~((1 << __PMAP_PTP_SHIFT) - 1))
53 #define	__PMAP_PTP_PG_N		(PAGE_SIZE / sizeof(pt_entry_t))
54 #define	__PMAP_PTP_INDEX(va)	(((va) >> __PMAP_PTP_SHIFT) & (__PMAP_PTP_N - 1))
55 #define	__PMAP_PTP_OFSET(va)	((va >> PGSHIFT) & (__PMAP_PTP_PG_N - 1))
56 
57 struct pmap __pmap_kernel;
58 STATIC vaddr_t __pmap_kve;	/* VA of last kernel virtual */
59 
60 /* For the fast tlb miss handler */
61 pt_entry_t **curptd;		/* p1 va of curlwp->...->pm_ptp */
62 
63 /* pmap pool */
64 STATIC struct pool __pmap_pmap_pool;
65 
66 /* pv_entry ops. */
67 struct pv_entry {
68 	struct pmap *pv_pmap;
69 	vaddr_t pv_va;
70 	vm_prot_t pv_prot;
71 	SLIST_ENTRY(pv_entry) pv_link;
72 };
73 #define	__pmap_pv_alloc()	pool_get(&__pmap_pv_pool, PR_NOWAIT)
74 #define	__pmap_pv_free(pv)	pool_put(&__pmap_pv_pool, (pv))
75 STATIC int __pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t, vm_prot_t);
76 STATIC void __pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t);
77 STATIC void *__pmap_pv_page_alloc(struct pool *, int, int *);
78 STATIC void __pmap_pv_page_free(struct pool *, void *);
79 STATIC struct pool __pmap_pv_pool;
80 STATIC struct pool_allocator pmap_pv_page_allocator = {
81 	__pmap_pv_page_alloc, __pmap_pv_page_free, 0,
82 };
83 
84 /* ASID ops. */
85 STATIC int __pmap_asid_alloc(void);
86 STATIC void __pmap_asid_free(int);
87 STATIC struct {
88 	uint32_t map[8];
89 	int hint;	/* hint for next allocation */
90 } __pmap_asid;
91 
92 /* page table entry ops. */
93 STATIC pt_entry_t *__pmap_pte_alloc(pmap_t, vaddr_t);
94 
95 /* pmap_enter util */
96 STATIC boolean_t __pmap_map_change(pmap_t, vaddr_t, paddr_t, vm_prot_t,
97     pt_entry_t);
98 
99 void
pmap_bootstrap(void)100 pmap_bootstrap(void)
101 {
102 	/* Steal msgbuf area */
103 	initmsgbuf((caddr_t)uvm_pageboot_alloc(MSGBUFSIZE), MSGBUFSIZE);
104 
105 	__pmap_kve = VM_MIN_KERNEL_ADDRESS;
106 
107 	pmap_kernel()->pm_refcnt = 1;
108 	pmap_kernel()->pm_ptp = (pt_entry_t **)uvm_pageboot_alloc(PAGE_SIZE);
109 	memset(pmap_kernel()->pm_ptp, 0, PAGE_SIZE);
110 
111 	/* Enable MMU */
112 	sh_mmu_start();
113 	/* Mask all interrupt */
114 	_cpu_intr_suspend();
115 	/* Enable exception for P3 access */
116 	_cpu_exception_resume(0);
117 }
118 
119 vaddr_t
pmap_steal_memory(vsize_t size,vaddr_t * vstart,vaddr_t * vend)120 pmap_steal_memory(vsize_t size, vaddr_t *vstart, vaddr_t *vend)
121 {
122 	struct vm_physseg *bank;
123 	int i, j, npage;
124 	paddr_t pa;
125 	vaddr_t va;
126 
127 	KDASSERT(!uvm.page_init_done);
128 
129 	size = round_page(size);
130 	npage = atop(size);
131 
132 	for (i = 0, bank = &vm_physmem[i]; i < vm_nphysseg; i++, bank++)
133 		if (npage <= bank->avail_end - bank->avail_start)
134 			break;
135 	KDASSERT(i != vm_nphysseg);
136 
137 	/* Steal pages */
138 	bank->avail_end -= npage;
139 	bank->end -= npage;
140 	pa = ptoa(bank->avail_end);
141 
142 	/* GC memory bank */
143 	if (bank->avail_start == bank->end) {
144 		/* Remove this segment from the list. */
145 		vm_nphysseg--;
146 		KDASSERT(vm_nphysseg > 0);
147 		for (j = i; i < vm_nphysseg; j++)
148 			vm_physmem[j] = vm_physmem[j + 1];
149 	}
150 
151 	va = SH3_PHYS_TO_P1SEG(pa);
152 	memset((caddr_t)va, 0, size);
153 
154 	if (vstart)
155 		*vstart = VM_MIN_KERNEL_ADDRESS;
156 	if (vend)
157 		*vend = VM_MAX_KERNEL_ADDRESS;
158 
159 	return (va);
160 }
161 
162 vaddr_t
pmap_growkernel(vaddr_t maxkvaddr)163 pmap_growkernel(vaddr_t maxkvaddr)
164 {
165 	int i, n;
166 
167 	if (maxkvaddr <= __pmap_kve)
168 		return (__pmap_kve);
169 
170 	i = __PMAP_PTP_INDEX(__pmap_kve - VM_MIN_KERNEL_ADDRESS);
171 	__pmap_kve = __PMAP_PTP_TRUNC(maxkvaddr);
172 	n = __PMAP_PTP_INDEX(__pmap_kve - VM_MIN_KERNEL_ADDRESS);
173 
174 	/* Allocate page table pages */
175 	for (;i < n; i++) {
176 		if (__pmap_kernel.pm_ptp[i] != NULL)
177 			continue;
178 
179 		if (uvm.page_init_done) {
180 			struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL,
181 			    UVM_PGA_USERESERVE | UVM_PGA_ZERO);
182 			if (pg == NULL)
183 				goto error;
184 			__pmap_kernel.pm_ptp[i] = (pt_entry_t *)
185 			    SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
186 		} else {
187 			pt_entry_t *ptp = (pt_entry_t *)
188 			    uvm_pageboot_alloc(PAGE_SIZE);
189 			if (ptp == NULL)
190 				goto error;
191 			__pmap_kernel.pm_ptp[i] = ptp;
192 			memset(ptp, 0, PAGE_SIZE);
193 		}
194 	}
195 
196 	return (__pmap_kve);
197  error:
198 	panic("pmap_growkernel: out of memory.");
199 	/* NOTREACHED */
200 }
201 
202 void
pmap_init(void)203 pmap_init(void)
204 {
205 	/* Initialize pmap module */
206 	pool_init(&__pmap_pmap_pool, sizeof(struct pmap), 0, IPL_NONE, 0,
207 	    "pmappl", &pool_allocator_single);
208 	pool_init(&__pmap_pv_pool, sizeof(struct pv_entry), 0, IPL_VM, 0,
209 	    "pvpl", &pmap_pv_page_allocator);
210 	pool_setlowat(&__pmap_pv_pool, 16);
211 }
212 
213 pmap_t
pmap_create(void)214 pmap_create(void)
215 {
216 	pmap_t pmap;
217 	struct vm_page *pg;
218 
219 	pmap = pool_get(&__pmap_pmap_pool, PR_WAITOK|PR_ZERO);
220 	pmap->pm_asid = -1;
221 	pmap->pm_refcnt = 1;
222 	/* Allocate page table page holder (512 slot) */
223 	while ((pg = uvm_pagealloc(NULL, 0, NULL,
224 	    UVM_PGA_USERESERVE | UVM_PGA_ZERO)) == NULL)
225 		uvm_wait("pmap_create");
226 
227 	pmap->pm_ptp = (pt_entry_t **)SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
228 
229 	return (pmap);
230 }
231 
232 void
pmap_destroy(pmap_t pmap)233 pmap_destroy(pmap_t pmap)
234 {
235 	int i;
236 
237 	if (--pmap->pm_refcnt > 0)
238 		return;
239 
240 	/* Deallocate all page table page */
241 	for (i = 0; i < __PMAP_PTP_N; i++) {
242 		vaddr_t va = (vaddr_t)pmap->pm_ptp[i];
243 		if (va == 0)
244 			continue;
245 #ifdef DEBUG	/* Check no mapping exists. */
246 		{
247 			int j;
248 			pt_entry_t *pte = (pt_entry_t *)va;
249 			for (j = 0; j < __PMAP_PTP_PG_N; j++, pte++)
250 				KDASSERT(*pte == 0);
251 		}
252 #endif /* DEBUG */
253 		/* Purge cache entry for next use of this page. */
254 		if (SH_HAS_VIRTUAL_ALIAS)
255 			sh_dcache_inv_range(va, PAGE_SIZE);
256 		/* Free page table */
257 		uvm_pagefree(PHYS_TO_VM_PAGE(SH3_P1SEG_TO_PHYS(va)));
258 	}
259 	/* Deallocate page table page holder */
260 	if (SH_HAS_VIRTUAL_ALIAS)
261 		sh_dcache_inv_range((vaddr_t)pmap->pm_ptp, PAGE_SIZE);
262 	uvm_pagefree(PHYS_TO_VM_PAGE(SH3_P1SEG_TO_PHYS((vaddr_t)pmap->pm_ptp)));
263 
264 	/* Free ASID */
265 	__pmap_asid_free(pmap->pm_asid);
266 
267 	pool_put(&__pmap_pmap_pool, pmap);
268 }
269 
270 void
pmap_reference(pmap_t pmap)271 pmap_reference(pmap_t pmap)
272 {
273 	pmap->pm_refcnt++;
274 }
275 
276 void
pmap_activate(struct proc * p)277 pmap_activate(struct proc *p)
278 {
279 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
280 
281 	if (pmap->pm_asid == -1)
282 		pmap->pm_asid = __pmap_asid_alloc();
283 
284 	KDASSERT(pmap->pm_asid >=0 && pmap->pm_asid < 256);
285 
286 	sh_tlb_set_asid(pmap->pm_asid);
287 	curptd = pmap->pm_ptp;
288 }
289 
290 int
pmap_enter(pmap_t pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,int flags)291 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
292 {
293 	struct vm_page *pg;
294 	pt_entry_t entry, *pte;
295 	boolean_t kva = (pmap == pmap_kernel());
296 
297 	/* "flags" never exceed "prot" */
298 	KDASSERT(prot != 0 && ((flags & PROT_MASK) & ~prot) == 0);
299 
300 	pg = PHYS_TO_VM_PAGE(pa);
301 	entry = (pa & PG_PPN) | PG_4K;
302 	if (flags & PMAP_WIRED)
303 		entry |= _PG_WIRED;
304 
305 	if (pg != NULL) {	/* memory-space */
306 		entry |= PG_C;	/* always cached */
307 
308 		/* Modified/reference tracking */
309 		if (flags & PROT_WRITE) {
310 			entry |= PG_V | PG_D;
311 			atomic_setbits_int(&pg->pg_flags,
312 			    PG_PMAP_MOD | PG_PMAP_REF);
313 		} else if (flags & PROT_MASK) {
314 			entry |= PG_V;
315 			atomic_setbits_int(&pg->pg_flags, PG_PMAP_REF);
316 		}
317 
318 		/* Protection */
319 		if ((prot & PROT_WRITE) && (pg->pg_flags & PG_PMAP_MOD)) {
320 			if (kva)
321 				entry |= PG_PR_KRW | PG_SH;
322 			else
323 				entry |= PG_PR_URW;
324 		} else {
325 			/* RO, COW page */
326 			if (kva)
327 				entry |= PG_PR_KRO | PG_SH;
328 			else
329 				entry |= PG_PR_URO;
330 		}
331 
332 		/* Check for existing mapping */
333 		if (__pmap_map_change(pmap, va, pa, prot, entry))
334 			return (0);
335 
336 		/* Add to physical-virtual map list of this page */
337 		if (__pmap_pv_enter(pmap, pg, va, prot) != 0) {
338 			if (flags & PMAP_CANFAIL)
339 				return (ENOMEM);
340 			panic("pmap_enter: cannot allocate pv entry");
341 		}
342 	} else {	/* bus-space (always uncached map) */
343 		if (kva) {
344 			entry |= PG_V | PG_SH |
345 			    ((prot & PROT_WRITE) ?
346 			    (PG_PR_KRW | PG_D) : PG_PR_KRO);
347 		} else {
348 			entry |= PG_V |
349 			    ((prot & PROT_WRITE) ?
350 			    (PG_PR_URW | PG_D) : PG_PR_URO);
351 		}
352 	}
353 
354 	/* Register to page table */
355 	if (kva)
356 		pte = __pmap_kpte_lookup(va);
357 	else {
358 		pte = __pmap_pte_alloc(pmap, va);
359 		if (pte == NULL) {
360 			if (flags & PMAP_CANFAIL)
361 				return ENOMEM;
362 			panic("pmap_enter: cannot allocate pte");
363 		}
364 	}
365 
366 	*pte = entry;
367 
368 	if (pmap->pm_asid != -1)
369 		sh_tlb_update(pmap->pm_asid, va, entry);
370 
371 	if (!SH_HAS_UNIFIED_CACHE &&
372 	    (prot == (PROT_READ | PROT_EXEC)))
373 		sh_icache_sync_range_index(va, PAGE_SIZE);
374 
375 	if (entry & _PG_WIRED)
376 		pmap->pm_stats.wired_count++;
377 	pmap->pm_stats.resident_count++;
378 
379 	return (0);
380 }
381 
382 /*
383  * boolean_t __pmap_map_change(pmap_t pmap, vaddr_t va, paddr_t pa,
384  *     vm_prot_t prot, pt_entry_t entry):
385  *	Handle the situation that pmap_enter() is called to enter a
386  *	mapping at a virtual address for which a mapping already
387  *	exists.
388  */
389 boolean_t
__pmap_map_change(pmap_t pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,pt_entry_t entry)390 __pmap_map_change(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
391     pt_entry_t entry)
392 {
393 	pt_entry_t *pte, oentry;
394 	vaddr_t eva = va + PAGE_SIZE;
395 
396 	if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
397 	    ((oentry = *pte) == 0))
398 		return (FALSE);		/* no mapping exists. */
399 
400 	if (pa != (oentry & PG_PPN)) {
401 		/* Enter a mapping at a mapping to another physical page. */
402 		pmap_remove(pmap, va, eva);
403 		return (FALSE);
404 	}
405 
406 	/* Pre-existing mapping */
407 
408 	/* Protection change. */
409 	if ((oentry & PG_PR_MASK) != (entry & PG_PR_MASK))
410 		pmap_protect(pmap, va, eva, prot);
411 
412 	/* Wired change */
413 	if (oentry & _PG_WIRED) {
414 		if (!(entry & _PG_WIRED)) {
415 			/* wired -> unwired */
416 			*pte = entry;
417 			/* "wired" is software bits. no need to update TLB */
418 			pmap->pm_stats.wired_count--;
419 		}
420 	} else if (entry & _PG_WIRED) {
421 		/* unwired -> wired. make sure to reflect "flags" */
422 		pmap_remove(pmap, va, eva);
423 		return (FALSE);
424 	}
425 
426 	return (TRUE);	/* mapping was changed. */
427 }
428 
429 /*
430  * int __pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr):
431  *	Insert physical-virtual map to vm_page.
432  *	Assume pre-existed mapping is already removed.
433  */
434 int
__pmap_pv_enter(pmap_t pmap,struct vm_page * pg,vaddr_t va,vm_prot_t prot)435 __pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, vm_prot_t prot)
436 {
437 	struct vm_page_md *pvh;
438 	struct pv_entry *pv;
439 	int s;
440 	int have_writeable = 0;
441 
442 	s = splvm();
443 	if (SH_HAS_VIRTUAL_ALIAS) {
444 		/* Remove all other mapping on this physical page */
445 		pvh = &pg->mdpage;
446 		if (prot & PROT_WRITE)
447 			have_writeable = 1;
448 		else {
449 			SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
450 				if (pv->pv_prot & PROT_WRITE) {
451 					have_writeable = 1;
452 					break;
453 				}
454 			}
455 		}
456 		if (have_writeable != 0) {
457 			while ((pv = SLIST_FIRST(&pvh->pvh_head)) != NULL)
458 				pmap_remove(pv->pv_pmap, pv->pv_va,
459 				    pv->pv_va + PAGE_SIZE);
460 		}
461 	}
462 
463 	/* Register pv map */
464 	pvh = &pg->mdpage;
465 	pv = __pmap_pv_alloc();
466 	if (pv == NULL) {
467 		splx(s);
468 		return (ENOMEM);
469 	}
470 
471 	pv->pv_pmap = pmap;
472 	pv->pv_va = va;
473 	pv->pv_prot = prot;
474 
475 	SLIST_INSERT_HEAD(&pvh->pvh_head, pv, pv_link);
476 	splx(s);
477 	return (0);
478 }
479 
480 void
pmap_remove(pmap_t pmap,vaddr_t sva,vaddr_t eva)481 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
482 {
483 	struct vm_page *pg;
484 	pt_entry_t *pte, entry;
485 	vaddr_t va;
486 
487 	KDASSERT((sva & PGOFSET) == 0);
488 
489 	for (va = sva; va < eva; va += PAGE_SIZE) {
490 		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
491 		    (entry = *pte) == 0)
492 			continue;
493 
494 		if ((pg = PHYS_TO_VM_PAGE(entry & PG_PPN)) != NULL)
495 			__pmap_pv_remove(pmap, pg, va);
496 
497 		if (entry & _PG_WIRED)
498 			pmap->pm_stats.wired_count--;
499 		pmap->pm_stats.resident_count--;
500 		*pte = 0;
501 
502 		/*
503 		 * When pmap->pm_asid == -1 (invalid ASID), old entry attribute
504 		 * to this pmap is already removed by pmap_activate().
505 		 */
506 		if (pmap->pm_asid != -1)
507 			sh_tlb_invalidate_addr(pmap->pm_asid, va);
508 	}
509 }
510 
511 /*
512  * void __pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr):
513  *	Remove physical-virtual map from vm_page.
514  */
515 void
__pmap_pv_remove(pmap_t pmap,struct vm_page * pg,vaddr_t vaddr)516 __pmap_pv_remove(pmap_t pmap, struct vm_page *pg, vaddr_t vaddr)
517 {
518 	struct vm_page_md *pvh;
519 	struct pv_entry *pv;
520 	int s;
521 
522 	s = splvm();
523 	pvh = &pg->mdpage;
524 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
525 		if (pv->pv_pmap == pmap && pv->pv_va == vaddr) {
526 			if (SH_HAS_VIRTUAL_ALIAS ||
527 			    (SH_HAS_WRITEBACK_CACHE &&
528 				(pg->pg_flags & PG_PMAP_MOD))) {
529 				/*
530 				 * Always use index ops. since I don't want to
531 				 * worry about address space.
532 				 */
533 				sh_dcache_wbinv_range_index
534 				    (pv->pv_va, PAGE_SIZE);
535 			}
536 
537 			SLIST_REMOVE(&pvh->pvh_head, pv, pv_entry, pv_link);
538 			__pmap_pv_free(pv);
539 			break;
540 		}
541 	}
542 #ifdef DEBUG
543 	/* Check duplicated map. */
544 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link)
545 	    KDASSERT(!(pv->pv_pmap == pmap && pv->pv_va == vaddr));
546 #endif
547 	splx(s);
548 }
549 
550 void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot)551 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
552 {
553 	pt_entry_t *pte, entry;
554 
555 	KDASSERT((va & PGOFSET) == 0);
556 	KDASSERT(va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS);
557 
558 	entry = (pa & PG_PPN) | PG_V | PG_SH | PG_4K;
559 	if (prot & PROT_WRITE)
560 		entry |= (PG_PR_KRW | PG_D);
561 	else
562 		entry |= PG_PR_KRO;
563 
564 	if (PHYS_TO_VM_PAGE(pa))
565 		entry |= PG_C;
566 
567 	pte = __pmap_kpte_lookup(va);
568 
569 	KDASSERT(*pte == 0);
570 	*pte = entry;
571 
572 	sh_tlb_update(0, va, entry);
573 }
574 
575 void
pmap_kremove(vaddr_t va,vsize_t len)576 pmap_kremove(vaddr_t va, vsize_t len)
577 {
578 	pt_entry_t *pte;
579 	vaddr_t eva = va + len;
580 
581 	KDASSERT((va & PGOFSET) == 0);
582 	KDASSERT((len & PGOFSET) == 0);
583 	KDASSERT(va >= VM_MIN_KERNEL_ADDRESS && eva <= VM_MAX_KERNEL_ADDRESS);
584 
585 	for (; va < eva; va += PAGE_SIZE) {
586 		pte = __pmap_kpte_lookup(va);
587 		KDASSERT(pte != NULL);
588 		if (*pte == 0)
589 			continue;
590 
591 		if (SH_HAS_VIRTUAL_ALIAS && PHYS_TO_VM_PAGE(*pte & PG_PPN))
592 			sh_dcache_wbinv_range(va, PAGE_SIZE);
593 		*pte = 0;
594 
595 		sh_tlb_invalidate_addr(0, va);
596 	}
597 }
598 
599 boolean_t
pmap_extract(pmap_t pmap,vaddr_t va,paddr_t * pap)600 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
601 {
602 	pt_entry_t *pte;
603 
604 	/* handle P1 and P2 specially: va == pa */
605 	if (pmap == pmap_kernel() && (va >> 30) == 2) {
606 		if (pap != NULL)
607 			*pap = va & SH3_PHYS_MASK;
608 		return (TRUE);
609 	}
610 
611 	pte = __pmap_pte_lookup(pmap, va);
612 	if (pte == NULL || *pte == 0)
613 		return (FALSE);
614 
615 	if (pap != NULL)
616 		*pap = (*pte & PG_PPN) | (va & PGOFSET);
617 
618 	return (TRUE);
619 }
620 
621 void
pmap_protect(pmap_t pmap,vaddr_t sva,vaddr_t eva,vm_prot_t prot)622 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
623 {
624 	boolean_t kernel = pmap == pmap_kernel();
625 	pt_entry_t *pte, entry, protbits;
626 	vaddr_t va;
627 	paddr_t pa;
628 	struct vm_page *pg;
629 	struct vm_page_md *pvh;
630 	struct pv_entry *pv, *head;
631 
632 	sva = trunc_page(sva);
633 
634 	if ((prot & PROT_READ) == PROT_NONE) {
635 		pmap_remove(pmap, sva, eva);
636 		return;
637 	}
638 
639 	switch (prot) {
640 	default:
641 		panic("pmap_protect: invalid protection mode %x", prot);
642 		/* NOTREACHED */
643 	case PROT_READ:
644 		/* FALLTHROUGH */
645 	case PROT_READ | PROT_EXEC:
646 		protbits = kernel ? PG_PR_KRO : PG_PR_URO;
647 		break;
648 	case PROT_READ | PROT_WRITE:
649 		/* FALLTHROUGH */
650 	case PROT_MASK:
651 		protbits = kernel ? PG_PR_KRW : PG_PR_URW;
652 		break;
653 	}
654 
655 	for (va = sva; va < eva; va += PAGE_SIZE) {
656 
657 		if (((pte = __pmap_pte_lookup(pmap, va)) == NULL) ||
658 		    (entry = *pte) == 0)
659 			continue;
660 
661 		if (SH_HAS_VIRTUAL_ALIAS && (entry & PG_D)) {
662 			if (!SH_HAS_UNIFIED_CACHE && (prot & PROT_EXEC))
663 				sh_icache_sync_range_index(va, PAGE_SIZE);
664 			else
665 				sh_dcache_wbinv_range_index(va, PAGE_SIZE);
666 		}
667 
668 		entry = (entry & ~PG_PR_MASK) | protbits;
669 		*pte = entry;
670 
671 		if (pmap->pm_asid != -1)
672 			sh_tlb_update(pmap->pm_asid, va, entry);
673 
674 		pa = entry & PG_PPN;
675 		pg = PHYS_TO_VM_PAGE(pa);
676 		if (pg == NULL)
677 			continue;
678 		pvh = &pg->mdpage;
679 
680 		while ((pv = SLIST_FIRST(&pvh->pvh_head)) != NULL) {
681 			if (pv->pv_pmap == pmap && pv->pv_va == va) {
682 				break;
683 			}
684 			pmap_remove(pv->pv_pmap, pv->pv_va,
685 			    pv->pv_va + PAGE_SIZE);
686 		}
687 		/* the matching pv is first in the list */
688 		SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
689 			if (pv->pv_pmap == pmap && pv->pv_va == va) {
690 				pv->pv_prot = prot;
691 				break;
692 			}
693 		}
694 		/* remove the rest of the elements */
695 		head = SLIST_FIRST(&pvh->pvh_head);
696 		if (head != NULL)
697 			while((pv = SLIST_NEXT(head, pv_link))!= NULL)
698 				pmap_remove(pv->pv_pmap, pv->pv_va,
699 				    pv->pv_va + PAGE_SIZE);
700 	}
701 }
702 
703 void
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)704 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
705 {
706 	struct vm_page_md *pvh = &pg->mdpage;
707 	struct pv_entry *pv;
708 	struct pmap *pmap;
709 	vaddr_t va;
710 	int s;
711 
712 	switch (prot) {
713 	case PROT_READ | PROT_WRITE:
714 		/* FALLTHROUGH */
715 	case PROT_MASK:
716 		break;
717 
718 	case PROT_READ:
719 		/* FALLTHROUGH */
720 	case PROT_READ | PROT_EXEC:
721 		s = splvm();
722 		SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
723 			pmap = pv->pv_pmap;
724 			va = pv->pv_va;
725 
726 			KDASSERT(pmap);
727 			pmap_protect(pmap, va, va + PAGE_SIZE, prot);
728 		}
729 		splx(s);
730 		break;
731 
732 	default:
733 		/* Remove all */
734 		s = splvm();
735 		while ((pv = SLIST_FIRST(&pvh->pvh_head)) != NULL) {
736 			va = pv->pv_va;
737 			pmap_remove(pv->pv_pmap, va, va + PAGE_SIZE);
738 		}
739 		splx(s);
740 	}
741 }
742 
743 void
pmap_unwire(pmap_t pmap,vaddr_t va)744 pmap_unwire(pmap_t pmap, vaddr_t va)
745 {
746 	pt_entry_t *pte, entry;
747 
748 	if ((pte = __pmap_pte_lookup(pmap, va)) == NULL ||
749 	    (entry = *pte) == 0 ||
750 	    (entry & _PG_WIRED) == 0)
751 		return;
752 
753 	*pte = entry & ~_PG_WIRED;
754 	pmap->pm_stats.wired_count--;
755 }
756 
757 void
pmap_proc_iflush(struct process * pr,vaddr_t va,vsize_t len)758 pmap_proc_iflush(struct process *pr, vaddr_t va, vsize_t len)
759 {
760 	if (!SH_HAS_UNIFIED_CACHE)
761 		sh_icache_sync_range_index(va, len);
762 }
763 
764 void
pmap_zero_page(vm_page_t pg)765 pmap_zero_page(vm_page_t pg)
766 {
767 	paddr_t phys = VM_PAGE_TO_PHYS(pg);
768 
769 	if (SH_HAS_VIRTUAL_ALIAS) {	/* don't pollute cache */
770 		/* sync cache since we access via P2. */
771 		sh_dcache_wbinv_all();
772 		memset((void *)SH3_PHYS_TO_P2SEG(phys), 0, PAGE_SIZE);
773 	} else {
774 		memset((void *)SH3_PHYS_TO_P1SEG(phys), 0, PAGE_SIZE);
775 	}
776 }
777 
778 void
pmap_copy_page(vm_page_t srcpg,vm_page_t dstpg)779 pmap_copy_page(vm_page_t srcpg, vm_page_t dstpg)
780 {
781 	paddr_t src,dst;
782 
783 	src = VM_PAGE_TO_PHYS(srcpg);
784 	dst = VM_PAGE_TO_PHYS(dstpg);
785 
786 	if (SH_HAS_VIRTUAL_ALIAS) {	/* don't pollute cache */
787 		/* sync cache since we access via P2. */
788 		sh_dcache_wbinv_all();
789 		memcpy((void *)SH3_PHYS_TO_P2SEG(dst),
790 		    (void *)SH3_PHYS_TO_P2SEG(src), PAGE_SIZE);
791 	} else {
792 		memcpy((void *)SH3_PHYS_TO_P1SEG(dst),
793 		    (void *)SH3_PHYS_TO_P1SEG(src), PAGE_SIZE);
794 	}
795 }
796 
797 boolean_t
pmap_is_referenced(struct vm_page * pg)798 pmap_is_referenced(struct vm_page *pg)
799 {
800 	return ((pg->pg_flags & PG_PMAP_REF) ? TRUE : FALSE);
801 }
802 
803 boolean_t
pmap_clear_reference(struct vm_page * pg)804 pmap_clear_reference(struct vm_page *pg)
805 {
806 	struct vm_page_md *pvh = &pg->mdpage;
807 	struct pv_entry *pv;
808 	pt_entry_t *pte;
809 	pmap_t pmap;
810 	vaddr_t va;
811 	int s;
812 
813 	if ((pg->pg_flags & PG_PMAP_REF) == 0)
814 		return (FALSE);
815 
816 	atomic_clearbits_int(&pg->pg_flags, PG_PMAP_REF);
817 
818 	s = splvm();
819 	/* Restart reference bit emulation */
820 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
821 		pmap = pv->pv_pmap;
822 		va = pv->pv_va;
823 
824 		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL)
825 			continue;
826 		if ((*pte & PG_V) == 0)
827 			continue;
828 		*pte &= ~PG_V;
829 
830 		if (pmap->pm_asid != -1)
831 			sh_tlb_invalidate_addr(pmap->pm_asid, va);
832 	}
833 	splx(s);
834 
835 	return (TRUE);
836 }
837 
838 boolean_t
pmap_is_modified(struct vm_page * pg)839 pmap_is_modified(struct vm_page *pg)
840 {
841 	return ((pg->pg_flags & PG_PMAP_MOD) ? TRUE : FALSE);
842 }
843 
844 boolean_t
pmap_clear_modify(struct vm_page * pg)845 pmap_clear_modify(struct vm_page *pg)
846 {
847 	struct vm_page_md *pvh = &pg->mdpage;
848 	struct pv_entry *pv;
849 	struct pmap *pmap;
850 	pt_entry_t *pte, entry;
851 	boolean_t modified;
852 	vaddr_t va;
853 	int s;
854 
855 	modified = pg->pg_flags & PG_PMAP_MOD;
856 	if (!modified)
857 		return (FALSE);
858 
859 	atomic_clearbits_int(&pg->pg_flags, PG_PMAP_MOD);
860 
861 	s = splvm();
862 	if (SLIST_EMPTY(&pvh->pvh_head)) {/* no map on this page */
863 		splx(s);
864 		return (TRUE);
865 	}
866 
867 	/* Write-back and invalidate TLB entry */
868 	if (!SH_HAS_VIRTUAL_ALIAS && SH_HAS_WRITEBACK_CACHE)
869 		sh_dcache_wbinv_all();
870 
871 	SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
872 		pmap = pv->pv_pmap;
873 		va = pv->pv_va;
874 		if ((pte = __pmap_pte_lookup(pmap, va)) == NULL)
875 			continue;
876 		entry = *pte;
877 		if ((entry & PG_D) == 0)
878 			continue;
879 
880 		if (SH_HAS_VIRTUAL_ALIAS)
881 			sh_dcache_wbinv_range_index(va, PAGE_SIZE);
882 
883 		*pte = entry & ~PG_D;
884 		if (pmap->pm_asid != -1)
885 			sh_tlb_invalidate_addr(pmap->pm_asid, va);
886 	}
887 	splx(s);
888 
889 	return (TRUE);
890 }
891 
892 #ifdef SH4
893 /*
894  * pmap_prefer_align()
895  *
896  * Return virtual cache alignment.
897  */
898 vaddr_t
pmap_prefer_align(void)899 pmap_prefer_align(void)
900 {
901 	return SH_HAS_VIRTUAL_ALIAS ? sh_cache_prefer_mask + 1 : 0;
902 }
903 
904 /*
905  * pmap_prefer_offset(vaddr_t of)
906  *
907  * Calculate offset in virtual cache.
908  */
909 vaddr_t
pmap_prefer_offset(vaddr_t of)910 pmap_prefer_offset(vaddr_t of)
911 {
912 	return of & (SH_HAS_VIRTUAL_ALIAS ? sh_cache_prefer_mask : 0);
913 }
914 #endif /* SH4 */
915 
916 /*
917  * pv_entry pool allocator:
918  *	void *__pmap_pv_page_alloc(struct pool *pool, int flags, int *slowdown):
919  *	void __pmap_pv_page_free(struct pool *pool, void *v):
920  */
921 void *
__pmap_pv_page_alloc(struct pool * pool,int flags,int * slowdown)922 __pmap_pv_page_alloc(struct pool *pool, int flags, int *slowdown)
923 {
924 	struct vm_page *pg;
925 
926 	*slowdown = 0;
927 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
928 	if (pg == NULL)
929 		return (NULL);
930 
931 	return ((void *)SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg)));
932 }
933 
934 void
__pmap_pv_page_free(struct pool * pool,void * v)935 __pmap_pv_page_free(struct pool *pool, void *v)
936 {
937 	vaddr_t va = (vaddr_t)v;
938 
939 	/* Invalidate cache for next use of this page */
940 	if (SH_HAS_VIRTUAL_ALIAS)
941 		sh_dcache_inv_range(va, PAGE_SIZE);
942 	uvm_pagefree(PHYS_TO_VM_PAGE(SH3_P1SEG_TO_PHYS(va)));
943 }
944 
945 /*
946  * pt_entry_t __pmap_pte_alloc(pmap_t pmap, vaddr_t va):
947  *	lookup page table entry. if found returns it, else allocate it.
948  *	page table is accessed via P1.
949  */
950 pt_entry_t *
__pmap_pte_alloc(pmap_t pmap,vaddr_t va)951 __pmap_pte_alloc(pmap_t pmap, vaddr_t va)
952 {
953 	struct vm_page *pg;
954 	pt_entry_t *ptp, *pte;
955 
956 	if ((pte = __pmap_pte_lookup(pmap, va)) != NULL)
957 		return (pte);
958 
959 	/* Allocate page table (not managed page) */
960 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE | UVM_PGA_ZERO);
961 	if (pg == NULL)
962 		return NULL;
963 
964 	ptp = (pt_entry_t *)SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
965 	pmap->pm_ptp[__PMAP_PTP_INDEX(va)] = ptp;
966 
967 	return (ptp + __PMAP_PTP_OFSET(va));
968 }
969 
970 /*
971  * pt_entry_t *__pmap_pte_lookup(pmap_t pmap, vaddr_t va):
972  *	lookup page table entry, if not allocated, returns NULL.
973  */
974 pt_entry_t *
__pmap_pte_lookup(pmap_t pmap,vaddr_t va)975 __pmap_pte_lookup(pmap_t pmap, vaddr_t va)
976 {
977 	pt_entry_t *ptp;
978 
979 	if (pmap == pmap_kernel())
980 		return (__pmap_kpte_lookup(va));
981 
982 	/* Lookup page table page */
983 	ptp = pmap->pm_ptp[__PMAP_PTP_INDEX(va)];
984 	if (ptp == NULL)
985 		return (NULL);
986 
987 	return (ptp + __PMAP_PTP_OFSET(va));
988 }
989 
990 /*
991  * pt_entry_t *__pmap_kpte_lookup(vaddr_t va):
992  *	kernel virtual only version of __pmap_pte_lookup().
993  */
994 pt_entry_t *
__pmap_kpte_lookup(vaddr_t va)995 __pmap_kpte_lookup(vaddr_t va)
996 {
997 	pt_entry_t *ptp;
998 
999 	ptp =
1000 	    __pmap_kernel.pm_ptp[__PMAP_PTP_INDEX(va - VM_MIN_KERNEL_ADDRESS)];
1001 	return (ptp ? ptp + __PMAP_PTP_OFSET(va) : NULL);
1002 }
1003 
1004 /*
1005  * boolean_t __pmap_pte_load(pmap_t pmap, vaddr_t va, int flags):
1006  *	lookup page table entry, if found it, load to TLB.
1007  *	flags specify do emulate reference and/or modified bit or not.
1008  */
1009 boolean_t
__pmap_pte_load(pmap_t pmap,vaddr_t va,int flags)1010 __pmap_pte_load(pmap_t pmap, vaddr_t va, int flags)
1011 {
1012 	struct vm_page *pg;
1013 	pt_entry_t *pte;
1014 	pt_entry_t entry;
1015 
1016 	KDASSERT((((int)va < 0) && (pmap == pmap_kernel())) ||
1017 	    (((int)va >= 0) && (pmap != pmap_kernel())));
1018 
1019 	/* Lookup page table entry */
1020 	if (((pte = __pmap_pte_lookup(pmap, va)) == NULL) ||
1021 	    ((entry = *pte) == 0))
1022 		return (FALSE);
1023 
1024 	KDASSERT(va != 0);
1025 
1026 	/* Emulate reference/modified tracking for managed page. */
1027 	if (flags != 0 && (pg = PHYS_TO_VM_PAGE(entry & PG_PPN)) != NULL) {
1028 		if (flags & PG_PMAP_REF)
1029 			entry |= PG_V;
1030 		if (flags & PG_PMAP_MOD)
1031 			entry |= PG_D;
1032 		atomic_setbits_int(&pg->pg_flags, flags);
1033 		*pte = entry;
1034 	}
1035 
1036 	/* When pmap has valid ASID, register to TLB */
1037 	if (pmap->pm_asid != -1)
1038 		sh_tlb_update(pmap->pm_asid, va, entry);
1039 
1040 	return (TRUE);
1041 }
1042 
1043 /*
1044  * int __pmap_asid_alloc(void):
1045  *	Allocate new ASID. if all ASID are used, steal from other process.
1046  */
1047 int
__pmap_asid_alloc(void)1048 __pmap_asid_alloc(void)
1049 {
1050 	struct process *pr;
1051 	int i, j, k, n, map, asid;
1052 
1053 	/* Search free ASID */
1054 	i = __pmap_asid.hint >> 5;
1055 	n = i + 8;
1056 	for (; i < n; i++) {
1057 		k = i & 0x7;
1058 		map = __pmap_asid.map[k];
1059 		for (j = 0; j < 32; j++) {
1060 			if ((map & (1 << j)) == 0 && (k + j) != 0) {
1061 				__pmap_asid.map[k] |= (1 << j);
1062 				__pmap_asid.hint = (k << 5) + j;
1063 				return (__pmap_asid.hint);
1064 			}
1065 		}
1066 	}
1067 
1068 	/* Steal ASID */
1069 	/*
1070 	 * XXX this always steals the ASID of the *newest* proc with one,
1071 	 * so it's far from LRU but rather almost pessimal once you have
1072 	 * too many processes.
1073 	 */
1074 	LIST_FOREACH(pr, &allprocess, ps_list) {
1075 		pmap_t pmap = pr->ps_vmspace->vm_map.pmap;
1076 
1077 		if ((asid = pmap->pm_asid) > 0) {
1078 			pmap->pm_asid = -1;
1079 			__pmap_asid.hint = asid;
1080 			/* Invalidate all old ASID entry */
1081 			sh_tlb_invalidate_asid(asid);
1082 
1083 			return (__pmap_asid.hint);
1084 		}
1085 	}
1086 
1087 	panic("No ASID allocated.");
1088 	/* NOTREACHED */
1089 }
1090 
1091 /*
1092  * void __pmap_asid_free(int asid):
1093  *	Return unused ASID to pool. and remove all TLB entry of ASID.
1094  */
1095 void
__pmap_asid_free(int asid)1096 __pmap_asid_free(int asid)
1097 {
1098 	int i;
1099 
1100 	if (asid < 1)	/* Don't invalidate kernel ASID 0 */
1101 		return;
1102 
1103 	sh_tlb_invalidate_asid(asid);
1104 
1105 	i = asid >> 5;
1106 	__pmap_asid.map[i] &= ~(1 << (asid - (i << 5)));
1107 }
1108 
1109 /*
1110  * Routines used by PMAP_MAP_DIRECT() and PMAP_UNMAP_DIRECT() to provide
1111  * directly-translated pages.
1112  *
1113  * Because of cache virtual aliases, it is necessary to evict these pages
1114  * from the cache, when `unmapping' them (as they might be reused by a
1115  * different allocator). We also rely upon all users of pages to either
1116  * use them with pmap_enter()/pmap_remove(), to enforce proper cache handling,
1117  * or to invoke sh_dcache_inv_range() themselves, as done for page tables.
1118  */
1119 vaddr_t
pmap_map_direct(vm_page_t pg)1120 pmap_map_direct(vm_page_t pg)
1121 {
1122 	return SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
1123 }
1124 
1125 vm_page_t
pmap_unmap_direct(vaddr_t va)1126 pmap_unmap_direct(vaddr_t va)
1127 {
1128 	paddr_t pa = SH3_P1SEG_TO_PHYS(va);
1129 	vm_page_t pg = PHYS_TO_VM_PAGE(pa);
1130 
1131 	if (SH_HAS_VIRTUAL_ALIAS)
1132 		sh_dcache_inv_range(va, PAGE_SIZE);
1133 
1134 	return pg;
1135 }
1136