xref: /netbsd/sys/arch/powerpc/ibm4xx/pmap.c (revision d9714014)
1 /*	$NetBSD: pmap.c,v 1.109 2022/10/05 09:03:06 rin Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed for the NetBSD Project by
20  *      Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40  * Copyright (C) 1995, 1996 TooLs GmbH.
41  * All rights reserved.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed by TooLs GmbH.
54  * 4. The name of TooLs GmbH may not be used to endorse or promote products
55  *    derived from this software without specific prior written permission.
56  *
57  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67  */
68 
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.109 2022/10/05 09:03:06 rin Exp $");
71 
72 #ifdef _KERNEL_OPT
73 #include "opt_ddb.h"
74 #include "opt_pmap.h"
75 #endif
76 
77 #include <sys/param.h>
78 #include <sys/cpu.h>
79 #include <sys/device.h>
80 #include <sys/kmem.h>
81 #include <sys/pool.h>
82 #include <sys/proc.h>
83 #include <sys/queue.h>
84 #include <sys/systm.h>
85 
86 #include <uvm/uvm.h>
87 
88 #include <machine/powerpc.h>
89 
90 #include <powerpc/pcb.h>
91 
92 #include <powerpc/spr.h>
93 #include <powerpc/ibm4xx/spr.h>
94 
95 #include <powerpc/ibm4xx/cpu.h>
96 #include <powerpc/ibm4xx/tlb.h>
97 
98 /*
99  * kernmap is an array of PTEs large enough to map in
100  * 4GB.  At 16KB/page it is 256K entries or 2MB.
101  */
102 #define KERNMAP_SIZE	((0xffffffffU / PAGE_SIZE) + 1)
103 void *kernmap;
104 
105 #define MINCTX		2
106 #define NUMCTX		256
107 
108 volatile struct pmap *ctxbusy[NUMCTX];
109 
110 #define TLBF_USED	0x1
111 #define	TLBF_REF	0x2
112 #define	TLBF_LOCKED	0x4
113 #define	TLB_LOCKED(i)	(tlb_info[(i)].ti_flags & TLBF_LOCKED)
114 
115 typedef struct tlb_info_s {
116 	char	ti_flags;
117 	char	ti_ctx;		/* TLB_PID assiciated with the entry */
118 	u_int	ti_va;
119 } tlb_info_t;
120 
121 volatile tlb_info_t tlb_info[NTLB];
122 /* We'll use a modified FIFO replacement policy cause it's cheap */
123 volatile int tlbnext;
124 
125 static int tlb_nreserved = 0;
126 static int pmap_bootstrap_done = 0;
127 
128 /* Event counters */
129 struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
130     NULL, "cpu", "tlbmiss");
131 struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
132     NULL, "cpu", "tlbflush");
133 struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
134     NULL, "cpu", "tlbenter");
135 EVCNT_ATTACH_STATIC(tlbmiss_ev);
136 EVCNT_ATTACH_STATIC(tlbflush_ev);
137 EVCNT_ATTACH_STATIC(tlbenter_ev);
138 
139 struct pmap kernel_pmap_;
140 struct pmap *const kernel_pmap_ptr = &kernel_pmap_;
141 
142 static int npgs;
143 static u_int nextavail;
144 #ifndef MSGBUFADDR
145 extern paddr_t msgbuf_paddr;
146 #endif
147 
148 static struct mem_region *mem, *avail;
149 
150 /*
151  * This is a cache of referenced/modified bits.
152  * Bits herein are shifted by ATTRSHFT.
153  */
154 static char *pmap_attrib;
155 
156 #define PV_WIRED	0x1
157 #define PV_WIRE(pv)	((pv)->pv_va |= PV_WIRED)
158 #define PV_UNWIRE(pv)	((pv)->pv_va &= ~PV_WIRED)
159 #define PV_ISWIRED(pv)	((pv)->pv_va & PV_WIRED)
160 #define PV_VA(pv)	((pv)->pv_va & ~PV_WIRED)
161 #define PV_CMPVA(va,pv)	(!(PV_VA(pv) ^ (va)))
162 
163 struct pv_entry {
164 	struct pv_entry *pv_next;	/* Linked list of mappings */
165 	struct pmap *pv_pm;
166 	vaddr_t pv_va;			/* virtual address of mapping */
167 };
168 
169 /* Each index corresponds to TLB_SIZE_* value. */
170 static size_t tlbsize[] = {
171 	1024, 		/* TLB_SIZE_1K */
172 	4096, 		/* TLB_SIZE_4K */
173 	16384, 		/* TLB_SIZE_16K */
174 	65536, 		/* TLB_SIZE_64K */
175 	262144, 	/* TLB_SIZE_256K */
176 	1048576, 	/* TLB_SIZE_1M */
177 	4194304, 	/* TLB_SIZE_4M */
178 	16777216, 	/* TLB_SIZE_16M */
179 };
180 
181 struct pv_entry *pv_table;
182 static struct pool pv_pool;
183 
184 static int pmap_initialized;
185 
186 static void ctx_flush(int);
187 
188 struct pv_entry *pa_to_pv(paddr_t);
189 static inline char *pa_to_attr(paddr_t);
190 
191 static inline volatile u_int *pte_find(struct pmap *, vaddr_t);
192 static inline int pte_enter(struct pmap *, vaddr_t, u_int);
193 
194 static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, int);
195 static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t);
196 
197 static inline void tlb_invalidate_entry(int);
198 
199 static int ppc4xx_tlb_size_mask(size_t, int *, int *);
200 
201 
202 struct pv_entry *
pa_to_pv(paddr_t pa)203 pa_to_pv(paddr_t pa)
204 {
205 	uvm_physseg_t bank;
206 	psize_t pg;
207 
208 	bank = uvm_physseg_find(atop(pa), &pg);
209 	if (bank == UVM_PHYSSEG_TYPE_INVALID)
210 		return NULL;
211 	return &uvm_physseg_get_pmseg(bank)->pvent[pg];
212 }
213 
214 static inline char *
pa_to_attr(paddr_t pa)215 pa_to_attr(paddr_t pa)
216 {
217 	uvm_physseg_t bank;
218 	psize_t pg;
219 
220 	bank = uvm_physseg_find(atop(pa), &pg);
221 	if (bank == UVM_PHYSSEG_TYPE_INVALID)
222 		return NULL;
223 	return &uvm_physseg_get_pmseg(bank)->attrs[pg];
224 }
225 
226 /*
227  * Insert PTE into page table.
228  */
229 static inline int
pte_enter(struct pmap * pm,vaddr_t va,u_int pte)230 pte_enter(struct pmap *pm, vaddr_t va, u_int pte)
231 {
232 	int seg = STIDX(va), ptn = PTIDX(va);
233 	u_int oldpte;
234 
235 	if (!pm->pm_ptbl[seg]) {
236 		/* Don't allocate a page to clear a non-existent mapping. */
237 		if (!pte)
238 			return 0;
239 
240 		vaddr_t km = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
241 		    UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_NOWAIT);
242 
243 		if (__predict_false(km == 0))
244 			return ENOMEM;
245 
246 		pm->pm_ptbl[seg] = (u_int *)km;
247 	}
248 	oldpte = pm->pm_ptbl[seg][ptn];
249 	pm->pm_ptbl[seg][ptn] = pte;
250 
251 	/* Flush entry. */
252 	ppc4xx_tlb_flush(va, pm->pm_ctx);
253 	if (oldpte != pte) {
254 		if (pte == 0)
255 			pm->pm_stats.resident_count--;
256 		else
257 			pm->pm_stats.resident_count++;
258 	}
259 	return 0;
260 }
261 
262 /*
263  * Get a pointer to a PTE in a page table.
264  */
265 volatile u_int *
pte_find(struct pmap * pm,vaddr_t va)266 pte_find(struct pmap *pm, vaddr_t va)
267 {
268 	int seg = STIDX(va), ptn = PTIDX(va);
269 
270 	if (pm->pm_ptbl[seg])
271 		return &pm->pm_ptbl[seg][ptn];
272 
273 	return NULL;
274 }
275 
276 /*
277  * This is called during initppc, before the system is really initialized.
278  */
279 void
pmap_bootstrap(u_int kernelstart,u_int kernelend)280 pmap_bootstrap(u_int kernelstart, u_int kernelend)
281 {
282 	struct mem_region *mp, *mp1;
283 	int cnt, i;
284 	u_int s, e, sz;
285 
286 	tlbnext = tlb_nreserved;
287 
288 	/*
289 	 * Allocate the kernel page table at the end of
290 	 * kernel space so it's in the locked TTE.
291 	 */
292 	kernmap = (void *)kernelend;
293 
294 	/*
295 	 * Initialize kernel page table.
296 	 */
297 	for (i = 0; i < STSZ; i++)
298 		pmap_kernel()->pm_ptbl[i] = NULL;
299 	ctxbusy[0] = ctxbusy[1] = pmap_kernel();
300 
301 	/*
302 	 * Announce page-size to the VM-system
303 	 */
304 	uvmexp.pagesize = NBPG;
305 	uvm_md_init();
306 
307 	/*
308 	 * Get memory.
309 	 */
310 	mem_regions(&mem, &avail);
311 	for (mp = mem; mp->size; mp++) {
312 		physmem += btoc(mp->size);
313 		printf("+%lx,", mp->size);
314 	}
315 	printf("\n");
316 	ppc4xx_tlb_init();
317 	/*
318 	 * Count the number of available entries.
319 	 */
320 	for (cnt = 0, mp = avail; mp->size; mp++)
321 		cnt++;
322 
323 	/*
324 	 * Page align all regions.
325 	 * Non-page aligned memory isn't very interesting to us.
326 	 * Also, sort the entries for ascending addresses.
327 	 */
328 	kernelstart &= ~PGOFSET;
329 	kernelend = (kernelend + PGOFSET) & ~PGOFSET;
330 	for (mp = avail; mp->size; mp++) {
331 		s = mp->start;
332 		e = mp->start + mp->size;
333 		printf("%08x-%08x -> ", s, e);
334 		/*
335 		 * Check whether this region holds all of the kernel.
336 		 */
337 		if (s < kernelstart && e > kernelend) {
338 			avail[cnt].start = kernelend;
339 			avail[cnt++].size = e - kernelend;
340 			e = kernelstart;
341 		}
342 		/*
343 		 * Look whether this regions starts within the kernel.
344 		 */
345 		if (s >= kernelstart && s < kernelend) {
346 			if (e <= kernelend)
347 				goto empty;
348 			s = kernelend;
349 		}
350 		/*
351 		 * Now look whether this region ends within the kernel.
352 		 */
353 		if (e > kernelstart && e <= kernelend) {
354 			if (s >= kernelstart)
355 				goto empty;
356 			e = kernelstart;
357 		}
358 		/*
359 		 * Now page align the start and size of the region.
360 		 */
361 		s = round_page(s);
362 		e = trunc_page(e);
363 		if (e < s)
364 			e = s;
365 		sz = e - s;
366 		printf("%08x-%08x = %x\n", s, e, sz);
367 		/*
368 		 * Check whether some memory is left here.
369 		 */
370 		if (sz == 0) {
371  empty:
372 			memmove(mp, mp + 1,
373 			    (cnt - (mp - avail)) * sizeof(*mp));
374 			cnt--;
375 			mp--;
376 			continue;
377 		}
378 		/*
379 		 * Do an insertion sort.
380 		 */
381 		npgs += btoc(sz);
382 		for (mp1 = avail; mp1 < mp; mp1++)
383 			if (s < mp1->start)
384 				break;
385 		if (mp1 < mp) {
386 			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
387 			mp1->start = s;
388 			mp1->size = sz;
389 		} else {
390 			mp->start = s;
391 			mp->size = sz;
392 		}
393 	}
394 
395 	/*
396 	 * We cannot do pmap_steal_memory here,
397 	 * since we don't run with translation enabled yet.
398 	 */
399 #ifndef MSGBUFADDR
400 	/*
401 	 * allow for msgbuf
402 	 */
403 	sz = round_page(MSGBUFSIZE);
404 	mp = NULL;
405 	for (mp1 = avail; mp1->size; mp1++)
406 		if (mp1->size >= sz)
407 			mp = mp1;
408 	if (mp == NULL)
409 		panic("not enough memory?");
410 
411 	npgs -= btoc(sz);
412 	msgbuf_paddr = mp->start + mp->size - sz;
413 	mp->size -= sz;
414 	if (mp->size <= 0)
415 		memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof(*mp));
416 #endif
417 
418 	for (mp = avail; mp->size; mp++)
419 		uvm_page_physload(atop(mp->start), atop(mp->start + mp->size),
420 		    atop(mp->start), atop(mp->start + mp->size),
421 		    VM_FREELIST_DEFAULT);
422 
423 	/*
424 	 * Initialize kernel pmap and hardware.
425 	 */
426 	/* Setup TLB pid allocator so it knows we alreadu using PID 1 */
427 	pmap_kernel()->pm_ctx = KERNEL_PID;
428 	nextavail = avail->start;
429 
430 	pmap_bootstrap_done = 1;
431 }
432 
433 /*
434  * Restrict given range to physical memory
435  *
436  * (Used by /dev/mem)
437  */
438 void
pmap_real_memory(paddr_t * start,psize_t * size)439 pmap_real_memory(paddr_t *start, psize_t *size)
440 {
441 	struct mem_region *mp;
442 
443 	for (mp = mem; mp->size; mp++) {
444 		if (*start + *size > mp->start &&
445 		    *start < mp->start + mp->size) {
446 			if (*start < mp->start) {
447 				*size -= mp->start - *start;
448 				*start = mp->start;
449 			}
450 			if (*start + *size > mp->start + mp->size)
451 				*size = mp->start + mp->size - *start;
452 			return;
453 		}
454 	}
455 	*size = 0;
456 }
457 
458 /*
459  * Initialize anything else for pmap handling.
460  * Called during vm_init().
461  */
462 void
pmap_init(void)463 pmap_init(void)
464 {
465 	struct pv_entry *pv;
466 	vsize_t sz;
467 	vaddr_t addr;
468 	int bank, i, s;
469 	char *attr;
470 
471 	sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs);
472 	sz = round_page(sz);
473 	addr = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
474 
475 	s = splvm();
476 
477 	pv = pv_table = (struct pv_entry *)addr;
478 	for (i = npgs; --i >= 0;)
479 		pv++->pv_pm = NULL;
480 	pmap_attrib = (char *)pv;
481 	memset(pv, 0, npgs);
482 
483 	pv = pv_table;
484 	attr = pmap_attrib;
485 	for (bank = uvm_physseg_get_first(); uvm_physseg_valid_p(bank);
486 	     bank = uvm_physseg_get_next(bank)) {
487 		sz = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
488 		uvm_physseg_get_pmseg(bank)->pvent = pv;
489 		uvm_physseg_get_pmseg(bank)->attrs = attr;
490 		pv += sz;
491 		attr += sz;
492 	}
493 
494 	pmap_initialized = 1;
495 
496 	splx(s);
497 
498 	/* Setup a pool for additional pvlist structures */
499 	pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry",
500 	    NULL, IPL_VM);
501 }
502 
503 /*
504  * How much virtual space is available to the kernel?
505  */
506 void
pmap_virtual_space(vaddr_t * start,vaddr_t * end)507 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
508 {
509 
510 	*start = (vaddr_t) VM_MIN_KERNEL_ADDRESS;
511 	*end = (vaddr_t) VM_MAX_KERNEL_ADDRESS;
512 }
513 
514 #ifdef PMAP_GROWKERNEL
515 /*
516  * Preallocate kernel page tables to a specified VA.
517  * This simply loops through the first TTE for each
518  * page table from the beginning of the kernel pmap,
519  * reads the entry, and if the result is
520  * zero (either invalid entry or no page table) it stores
521  * a zero there, populating page tables in the process.
522  * This is not the most efficient technique but i don't
523  * expect it to be called that often.
524  */
525 extern struct vm_page *vm_page_alloc1(void);
526 extern void vm_page_free1(struct vm_page *);
527 
528 vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS;
529 
530 vaddr_t
pmap_growkernel(vaddr_t maxkvaddr)531 pmap_growkernel(vaddr_t maxkvaddr)
532 {
533 	struct pmap *pm = pmap_kernel();
534 	paddr_t pg;
535 	int seg, s;
536 
537 	s = splvm();
538 
539 	/* Align with the start of a page table */
540 	for (kbreak &= ~(PTMAP - 1); kbreak < maxkvaddr; kbreak += PTMAP) {
541 		seg = STIDX(kbreak);
542 
543 		if (pte_find(pm, kbreak))
544 			continue;
545 
546 		if (uvm.page_init_done)
547 			pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1());
548 		else if (!uvm_page_physget(&pg))
549 			panic("pmap_growkernel: no memory");
550 		if (!pg)
551 			panic("pmap_growkernel: no pages");
552 		pmap_zero_page((paddr_t)pg);
553 
554 		/* XXX This is based on all phymem being addressable */
555 		pm->pm_ptbl[seg] = (u_int *)pg;
556 	}
557 
558 	splx(s);
559 
560 	return kbreak;
561 }
562 
563 /*
564  *	vm_page_alloc1:
565  *
566  *	Allocate and return a memory cell with no associated object.
567  */
568 struct vm_page *
vm_page_alloc1(void)569 vm_page_alloc1(void)
570 {
571 	struct vm_page *pg;
572 
573 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
574 	if (pg) {
575 		pg->wire_count = 1;	/* no mappings yet */
576 		pg->flags &= ~PG_BUSY;	/* never busy */
577 	}
578 	return pg;
579 }
580 
581 /*
582  *	vm_page_free1:
583  *
584  *	Returns the given page to the free list,
585  *	disassociating it with any VM object.
586  *
587  *	Object and page must be locked prior to entry.
588  */
589 void
vm_page_free1(struct vm_page * pg)590 vm_page_free1(struct vm_page *pg)
591 {
592 
593 	KASSERTMSG(pg->flags == (PG_CLEAN | PG_FAKE),
594 	    "invalid page pg = %p, pa = %" PRIxPADDR,
595 	    pg, VM_PAGE_TO_PHYS(pg));
596 
597 	pg->flags |= PG_BUSY;
598 	pg->wire_count = 0;
599 	uvm_pagefree(pg);
600 }
601 #endif
602 
603 /*
604  * Create and return a physical map.
605  */
606 struct pmap *
pmap_create(void)607 pmap_create(void)
608 {
609 	struct pmap *pm;
610 
611 	pm = kmem_alloc(sizeof(*pm), KM_SLEEP);
612 	memset(pm, 0, sizeof(*pm));
613 	pm->pm_refs = 1;
614 	return pm;
615 }
616 
617 /*
618  * Add a reference to the given pmap.
619  */
620 void
pmap_reference(struct pmap * pm)621 pmap_reference(struct pmap *pm)
622 {
623 
624 	pm->pm_refs++;
625 }
626 
627 /*
628  * Retire the given pmap from service.
629  * Should only be called if the map contains no valid mappings.
630  */
631 void
pmap_destroy(struct pmap * pm)632 pmap_destroy(struct pmap *pm)
633 {
634 	int i;
635 
636 	if (--pm->pm_refs > 0)
637 		return;
638 	KASSERT(pm->pm_stats.resident_count == 0);
639 	KASSERT(pm->pm_stats.wired_count == 0);
640 	for (i = 0; i < STSZ; i++)
641 		if (pm->pm_ptbl[i]) {
642 			uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i],
643 			    PAGE_SIZE, UVM_KMF_WIRED);
644 			pm->pm_ptbl[i] = NULL;
645 		}
646 	if (pm->pm_ctx)
647 		ctx_free(pm);
648 	kmem_free(pm, sizeof(*pm));
649 }
650 
651 /*
652  * Copy the range specified by src_addr/len
653  * from the source map to the range dst_addr/len
654  * in the destination map.
655  *
656  * This routine is only advisory and need not do anything.
657  */
658 void
pmap_copy(struct pmap * dst_pmap,struct pmap * src_pmap,vaddr_t dst_addr,vsize_t len,vaddr_t src_addr)659 pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr,
660 	  vsize_t len, vaddr_t src_addr)
661 {
662 }
663 
664 /*
665  * Require that all active physical maps contain no
666  * incorrect entries NOW.
667  */
668 void
pmap_update(struct pmap * pmap)669 pmap_update(struct pmap *pmap)
670 {
671 }
672 
673 /*
674  * Fill the given physical page with zeroes.
675  */
676 void
pmap_zero_page(paddr_t pa)677 pmap_zero_page(paddr_t pa)
678 {
679 	int i;
680 
681 #ifdef PPC_4XX_NOCACHE
682 	memset((void *)pa, 0, PAGE_SIZE);
683 #else
684 
685 	for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
686 		__asm volatile ("dcbz 0,%0" : : "r" (pa));
687 		pa += CACHELINESIZE;
688 	}
689 #endif
690 }
691 
692 /*
693  * Copy the given physical source page to its destination.
694  */
695 void
pmap_copy_page(paddr_t src,paddr_t dst)696 pmap_copy_page(paddr_t src, paddr_t dst)
697 {
698 
699 	memcpy((void *)dst, (void *)src, PAGE_SIZE);
700 	dcache_wbinv_page(dst);
701 }
702 
703 static inline int
pmap_enter_pv(struct pmap * pm,vaddr_t va,paddr_t pa,int flags)704 pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa, int flags)
705 {
706 	struct pv_entry *pv, *npv;
707 	int s;
708 
709 	KASSERT(pmap_initialized);
710 
711 	s = splvm();
712 
713 	pv = pa_to_pv(pa);
714 	if (!pv->pv_pm) {
715 		/*
716 		 * No entries yet, use header as the first entry.
717 		 */
718 		pv->pv_va = va;
719 		pv->pv_pm = pm;
720 		pv->pv_next = NULL;
721 	} else {
722 		/*
723 		 * There is at least one other VA mapping this page.
724 		 * Place this entry after the header.
725 		 */
726 		npv = pool_get(&pv_pool, PR_NOWAIT);
727 		if (npv == NULL) {
728 			if ((flags & PMAP_CANFAIL) == 0)
729 				panic("pmap_enter_pv: failed");
730 			splx(s);
731 			return ENOMEM;
732 		}
733 		npv->pv_va = va;
734 		npv->pv_pm = pm;
735 		npv->pv_next = pv->pv_next;
736 		pv->pv_next = npv;
737 		pv = npv;
738 	}
739 	if (flags & PMAP_WIRED) {
740 		PV_WIRE(pv);
741 		pm->pm_stats.wired_count++;
742 	}
743 
744 	splx(s);
745 
746 	return 0;
747 }
748 
749 static void
pmap_remove_pv(struct pmap * pm,vaddr_t va,paddr_t pa)750 pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
751 {
752 	struct pv_entry *pv, *npv;
753 
754 	/*
755 	 * Remove from the PV table.
756 	 */
757 	pv = pa_to_pv(pa);
758 	if (!pv)
759 		return;
760 
761 	/*
762 	 * If it is the first entry on the list, it is actually
763 	 * in the header and we must copy the following entry up
764 	 * to the header.  Otherwise we must search the list for
765 	 * the entry.  In either case we free the now unused entry.
766 	 */
767 	if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
768 		if (PV_ISWIRED(pv))
769 			pm->pm_stats.wired_count--;
770 		if ((npv = pv->pv_next)) {
771 			*pv = *npv;
772 			pool_put(&pv_pool, npv);
773 		} else
774 			pv->pv_pm = NULL;
775 	} else {
776 		for (; (npv = pv->pv_next) != NULL; pv = npv)
777 			if (pm == npv->pv_pm && PV_CMPVA(va, npv))
778 				break;
779 		if (npv) {
780 			pv->pv_next = npv->pv_next;
781 			if (PV_ISWIRED(npv)) {
782 				pm->pm_stats.wired_count--;
783 			}
784 			pool_put(&pv_pool, npv);
785 		}
786 	}
787 }
788 
789 /*
790  * Insert physical page at pa into the given pmap at virtual address va.
791  */
792 int
pmap_enter(struct pmap * pm,vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)793 pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
794 {
795 	u_int tte;
796 	bool managed;
797 	int s;
798 
799 	/*
800 	 * Have to remove any existing mapping first.
801 	 */
802 	pmap_remove(pm, va, va + PAGE_SIZE);
803 
804 	if (flags & PMAP_WIRED)
805 		flags |= prot;
806 
807 	managed = uvm_pageismanaged(pa);
808 
809 	/*
810 	 * Generate TTE.
811 	 */
812 	tte = TTE_PA(pa);
813 	/* XXXX -- need to support multiple page sizes. */
814 	tte |= TTE_SZ_16K;
815 
816 	KASSERT((flags & (PMAP_NOCACHE | PME_WRITETHROUG)) !=
817 	    (PMAP_NOCACHE | PME_WRITETHROUG));
818 
819 	if (flags & PMAP_NOCACHE) {
820 		/* Must be I/O mapping */
821 		tte |= TTE_I | TTE_G;
822 	}
823 #ifdef PPC_4XX_NOCACHE
824 	tte |= TTE_I;
825 #else
826 	else if (flags & PME_WRITETHROUG) {
827 		/* Uncached and writethrough are not compatible */
828 		tte |= TTE_W;
829 	}
830 #endif
831 
832 	if (pm == pmap_kernel())
833 		tte |= TTE_ZONE(ZONE_PRIV);
834 	else
835 		tte |= TTE_ZONE(ZONE_USER);
836 
837 	if (flags & VM_PROT_WRITE)
838 		tte |= TTE_WR;
839 
840 	if (flags & VM_PROT_EXECUTE)
841 		tte |= TTE_EX;
842 
843 	/*
844 	 * Now record mapping for later back-translation.
845 	 */
846 	if (pmap_initialized && managed) {
847 		char *attr;
848 
849 		if (pmap_enter_pv(pm, va, pa, flags)) {
850 			/* Could not enter pv on a managed page */
851 			return ENOMEM;
852 		}
853 
854 		/* Now set attributes. */
855 		attr = pa_to_attr(pa);
856 		KASSERT(attr);
857 		if (flags & VM_PROT_ALL)
858 			*attr |= PMAP_ATTR_REF;
859 		if (flags & VM_PROT_WRITE)
860 			*attr |= PMAP_ATTR_CHG;
861 	}
862 
863 	s = splvm();
864 
865 	/* Insert page into page table. */
866 	if (__predict_false(pte_enter(pm, va, tte))) {
867 		if (__predict_false((flags & PMAP_CANFAIL) == 0))
868 			panic("%s: pte_enter", __func__);
869 		splx(s);
870 		return ENOMEM;
871 	}
872 
873 	/* If this is a real fault, enter it in the tlb */
874 	if (tte && ((flags & PMAP_WIRED) == 0)) {
875 		int s2 = splhigh();
876 		ppc4xx_tlb_enter(pm->pm_ctx, va, tte);
877 		splx(s2);
878 	}
879 
880 	splx(s);
881 
882 	/* Flush the real memory from the instruction cache. */
883 	if ((prot & VM_PROT_EXECUTE) && (tte & TTE_I) == 0)
884 		__syncicache((void *)pa, PAGE_SIZE);
885 
886 	return 0;
887 }
888 
889 void
pmap_unwire(struct pmap * pm,vaddr_t va)890 pmap_unwire(struct pmap *pm, vaddr_t va)
891 {
892 	struct pv_entry *pv;
893 	paddr_t pa;
894 	int s;
895 
896 	if (!pmap_extract(pm, va, &pa))
897 		return;
898 
899 	pv = pa_to_pv(pa);
900 	if (!pv)
901 		return;
902 
903 	s = splvm();
904 
905 	while (pv != NULL) {
906 		if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
907 			if (PV_ISWIRED(pv)) {
908 				PV_UNWIRE(pv);
909 				pm->pm_stats.wired_count--;
910 			}
911 			break;
912 		}
913 		pv = pv->pv_next;
914 	}
915 
916 	splx(s);
917 }
918 
919 void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)920 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
921 {
922 	struct pmap *pm = pmap_kernel();
923 	u_int tte;
924 	int s;
925 
926 	/*
927 	 * Generate TTE.
928 	 *
929 	 * XXXX
930 	 *
931 	 * Since the kernel does not handle execution privileges properly,
932 	 * we will handle read and execute permissions together.
933 	 */
934 	tte = 0;
935 	if (prot & VM_PROT_ALL) {
936 		tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV);
937 		/* XXXX -- need to support multiple page sizes. */
938 		tte |= TTE_SZ_16K;
939 
940 		KASSERT((flags & (PMAP_NOCACHE | PME_WRITETHROUG)) !=
941 		    (PMAP_NOCACHE | PME_WRITETHROUG));
942 
943 		if (flags & PMAP_NOCACHE)
944 			/* Must be I/O mapping */
945 			tte |= TTE_I | TTE_G;
946 #ifdef PPC_4XX_NOCACHE
947 		tte |= TTE_I;
948 #else
949 		else if (prot & PME_WRITETHROUG) {
950 			/* Uncached and writethrough are not compatible */
951 			tte |= TTE_W;
952 		}
953 #endif
954 		if (prot & VM_PROT_WRITE)
955 			tte |= TTE_WR;
956 	}
957 
958 	s = splvm();
959 
960 	/* Insert page into page table. */
961 	if (__predict_false(pte_enter(pm, va, tte)))
962 		panic("%s: pte_enter", __func__);
963 
964 	splx(s);
965 }
966 
967 void
pmap_kremove(vaddr_t va,vsize_t len)968 pmap_kremove(vaddr_t va, vsize_t len)
969 {
970 
971 	while (len > 0) {
972 		(void)pte_enter(pmap_kernel(), va, 0);	/* never fail */
973 		va += PAGE_SIZE;
974 		len -= PAGE_SIZE;
975 	}
976 }
977 
978 /*
979  * Remove the given range of mapping entries.
980  */
981 void
pmap_remove(struct pmap * pm,vaddr_t va,vaddr_t endva)982 pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
983 {
984 	paddr_t pa;
985 	volatile u_int *ptp;
986 	int s;
987 
988 	s = splvm();
989 
990 	while (va < endva) {
991 		if ((ptp = pte_find(pm, va)) && (pa = *ptp)) {
992 			pa = TTE_PA(pa);
993 			pmap_remove_pv(pm, va, pa);
994 			*ptp = 0;
995 			ppc4xx_tlb_flush(va, pm->pm_ctx);
996 			pm->pm_stats.resident_count--;
997 		}
998 		va += PAGE_SIZE;
999 	}
1000 
1001 	splx(s);
1002 }
1003 
1004 /*
1005  * Get the physical page address for the given pmap/virtual address.
1006  */
1007 bool
pmap_extract(struct pmap * pm,vaddr_t va,paddr_t * pap)1008 pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
1009 {
1010 	int seg = STIDX(va), ptn = PTIDX(va);
1011 	u_int pa = 0;
1012 	int s;
1013 
1014 	s = splvm();
1015 
1016 	if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn]) && pap)
1017 		*pap = TTE_PA(pa) | (va & PGOFSET);
1018 
1019 	splx(s);
1020 
1021 	return pa != 0;
1022 }
1023 
1024 /*
1025  * Lower the protection on the specified range of this pmap.
1026  *
1027  * There are only two cases: either the protection is going to 0,
1028  * or it is going to read-only.
1029  */
1030 void
pmap_protect(struct pmap * pm,vaddr_t sva,vaddr_t eva,vm_prot_t prot)1031 pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1032 {
1033 	volatile u_int *ptp;
1034 	int s, bic;
1035 
1036 	if ((prot & VM_PROT_READ) == 0) {
1037 		pmap_remove(pm, sva, eva);
1038 		return;
1039 	}
1040 	bic = 0;
1041 	if ((prot & VM_PROT_WRITE) == 0)
1042 		bic |= TTE_WR;
1043 	if ((prot & VM_PROT_EXECUTE) == 0)
1044 		bic |= TTE_EX;
1045 	if (bic == 0)
1046 		return;
1047 
1048 	s = splvm();
1049 
1050 	while (sva < eva) {
1051 		if ((ptp = pte_find(pm, sva)) != NULL) {
1052 			*ptp &= ~bic;
1053 			ppc4xx_tlb_flush(sva, pm->pm_ctx);
1054 		}
1055 		sva += PAGE_SIZE;
1056 	}
1057 
1058 	splx(s);
1059 }
1060 
1061 bool
pmap_check_attr(struct vm_page * pg,u_int mask,int clear)1062 pmap_check_attr(struct vm_page *pg, u_int mask, int clear)
1063 {
1064 	paddr_t pa;
1065 	char *attr;
1066 	int s, rv;
1067 
1068 	/*
1069 	 * First modify bits in cache.
1070 	 */
1071 	pa = VM_PAGE_TO_PHYS(pg);
1072 	attr = pa_to_attr(pa);
1073 	if (attr == NULL)
1074 		return false;
1075 
1076 	s = splvm();
1077 
1078 	rv = (*attr & mask) != 0;
1079 	if (clear) {
1080 		*attr &= ~mask;
1081 		pmap_page_protect(pg,
1082 		    mask == PMAP_ATTR_CHG ? VM_PROT_READ : 0);
1083 	}
1084 
1085 	splx(s);
1086 
1087 	return rv;
1088 }
1089 
1090 
1091 /*
1092  * Lower the protection on the specified physical page.
1093  *
1094  * There are only two cases: either the protection is going to 0,
1095  * or it is going to read-only.
1096  */
1097 void
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)1098 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1099 {
1100 	struct pv_entry *pvh, *pv, *npv;
1101 	struct pmap *pm;
1102 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
1103 	vaddr_t va;
1104 
1105 	pvh = pa_to_pv(pa);
1106 	if (pvh == NULL)
1107 		return;
1108 
1109 	/* Handle extra pvs which may be deleted in the operation */
1110 	for (pv = pvh->pv_next; pv; pv = npv) {
1111 		npv = pv->pv_next;
1112 
1113 		pm = pv->pv_pm;
1114 		va = PV_VA(pv);
1115 		pmap_protect(pm, va, va + PAGE_SIZE, prot);
1116 	}
1117 
1118 	/* Now check the head pv */
1119 	if (pvh->pv_pm) {
1120 		pv = pvh;
1121 		pm = pv->pv_pm;
1122 		va = PV_VA(pv);
1123 		pmap_protect(pm, va, va + PAGE_SIZE, prot);
1124 	}
1125 }
1126 
1127 /*
1128  * Activate the address space for the specified process.  If the process
1129  * is the current process, load the new MMU context.
1130  */
1131 void
pmap_activate(struct lwp * l)1132 pmap_activate(struct lwp *l)
1133 {
1134 #if 0
1135 	struct pcb *pcb = lwp_getpcb(l);
1136 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
1137 
1138 	/*
1139 	 * XXX Normally performed in cpu_lwp_fork().
1140 	 */
1141 	printf("pmap_activate(%p), pmap=%p\n",l,pmap);
1142 	pcb->pcb_pm = pmap;
1143 #endif
1144 }
1145 
1146 /*
1147  * Deactivate the specified process's address space.
1148  */
1149 void
pmap_deactivate(struct lwp * l)1150 pmap_deactivate(struct lwp *l)
1151 {
1152 }
1153 
1154 /*
1155  * Synchronize caches corresponding to [addr, addr+len) in p.
1156  */
1157 void
pmap_procwr(struct proc * p,vaddr_t va,size_t len)1158 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
1159 {
1160 	struct pmap *pm = p->p_vmspace->vm_map.pmap;
1161 
1162 	if (__predict_true(p == curproc)) {
1163 		int msr, ctx, pid;
1164 
1165 		/*
1166 		 * Take it easy! TLB miss handler takes care of us.
1167 		 */
1168 
1169 		/*
1170 	 	 * Need to turn off IMMU and switch to user context.
1171 		 * (icbi uses DMMU).
1172 		 */
1173 
1174 		if (!(ctx = pm->pm_ctx)) {
1175 			/* No context -- assign it one */
1176 			ctx_alloc(pm);
1177 			ctx = pm->pm_ctx;
1178 		}
1179 
1180 		__asm volatile (
1181 			"mfmsr	%[msr];"
1182 			"li	%[pid],0x20;"		/* Turn off IMMU */
1183 			"andc	%[pid],%[msr],%[pid];"
1184 			"ori	%[pid],%[pid],0x10;" /* Turn on DMMU for sure */
1185 			"mtmsr	%[pid];"
1186 			"isync;"
1187 			MFPID(%[pid])
1188 			MTPID(%[ctx])
1189 			"isync;"
1190 		"1:"
1191 			"dcbst	0,%[va];"
1192 			"icbi	0,%[va];"
1193 			"add	%[va],%[va],%[size];"
1194 			"sub.	%[len],%[len],%[size];"
1195 			"bge	1b;"
1196 			"sync;"
1197 			MTPID(%[pid])
1198 			"mtmsr	%[msr];"
1199 			"isync;"
1200 			: [msr] "=&r" (msr), [pid] "=&r" (pid)
1201 			: [ctx] "r" (ctx), [va] "r" (va), [len] "r" (len),
1202 			  [size] "r" (CACHELINESIZE));
1203 	} else {
1204 		paddr_t pa;
1205 		vaddr_t tva, eva;
1206 		int tlen;
1207 
1208 		/*
1209 		 * For p != curproc, we cannot rely upon TLB miss handler in
1210 		 * user context. Therefore, extract pa and operate againt it.
1211 		 *
1212 		 * Note that va below VM_MIN_KERNEL_ADDRESS is reserved for
1213 		 * direct mapping.
1214 		 */
1215 
1216 		for (tva = va; len > 0; tva = eva, len -= tlen) {
1217 			eva = uimin(tva + len, trunc_page(tva + PAGE_SIZE));
1218 			tlen = eva - tva;
1219 			if (!pmap_extract(pm, tva, &pa)) {
1220 				/* XXX should be already unmapped */
1221 				continue;
1222 			}
1223 			__syncicache((void *)pa, tlen);
1224 		}
1225 	}
1226 }
1227 
1228 static inline void
tlb_invalidate_entry(int i)1229 tlb_invalidate_entry(int i)
1230 {
1231 #ifdef PMAP_TLBDEBUG
1232 	/*
1233 	 * Clear only TLBHI[V] bit so that we can track invalidated entry.
1234 	 */
1235 	register_t msr, pid, hi;
1236 
1237 	KASSERT(mfspr(SPR_PID) == KERNEL_PID);
1238 
1239 	__asm volatile (
1240 		"mfmsr	%[msr];"
1241 		"li	%[pid],0;"
1242 		"mtmsr	%[pid];"
1243 		MFPID(%[pid])
1244 		"tlbre	%[hi],%[i],0;"
1245 		"andc	%[hi],%[hi],%[valid];"
1246 		"tlbwe	%[hi],%[i],0;"
1247 		MTPID(%[pid])
1248 		"mtmsr	%[msr];"
1249 		"isync;"
1250 		: [msr] "=&r" (msr), [pid] "=&r" (pid), [hi] "=&r" (hi)
1251 		: [i] "r" (i), [valid] "r" (TLB_VALID));
1252 #else
1253 	/*
1254 	 * Just clear entire TLBHI register.
1255 	 */
1256 	__asm volatile (
1257 		"tlbwe	%0,%1,0;"
1258 		"isync;"
1259 		: : "r" (0), "r" (i));
1260 #endif
1261 
1262 	tlb_info[i].ti_ctx = 0;
1263 	tlb_info[i].ti_flags = 0;
1264 }
1265 
1266 /* This has to be done in real mode !!! */
1267 void
ppc4xx_tlb_flush(vaddr_t va,int pid)1268 ppc4xx_tlb_flush(vaddr_t va, int pid)
1269 {
1270 	u_long msr, i, found;
1271 
1272 	/* If there's no context then it can't be mapped. */
1273 	if (!pid)
1274 		return;
1275 
1276 	__asm volatile (
1277 		MFPID(%[found])		/* Save PID */
1278 		"mfmsr	%[msr];"	/* Save MSR */
1279 		"li	%[i],0;"	/* Now clear MSR */
1280 		"mtmsr	%[i];"
1281 		"isync;"
1282 		MTPID(%[pid])		/* Set PID */
1283 		"isync;"
1284 		"tlbsx.	%[i],0,%[va];"	/* Search TLB */
1285 		"isync;"
1286 		MTPID(%[found])		/* Restore PID */
1287 		"mtmsr	%[msr];"	/* Restore MSR */
1288 		"isync;"
1289 		"li	%[found],1;"
1290 		"beq	1f;"
1291 		"li	%[found],0;"
1292 	"1:"
1293 		: [i] "=&r" (i), [found] "=&r" (found), [msr] "=&r" (msr)
1294 		: [va] "r" (va), [pid] "r" (pid));
1295 
1296 	if (found && !TLB_LOCKED(i)) {
1297 		/* Now flush translation */
1298 		tlb_invalidate_entry(i);
1299 		tlbnext = i;
1300 		/* Successful flushes */
1301 		tlbflush_ev.ev_count++;
1302 	}
1303 }
1304 
1305 void
ppc4xx_tlb_flush_all(void)1306 ppc4xx_tlb_flush_all(void)
1307 {
1308 	u_long i;
1309 
1310 	for (i = 0; i < NTLB; i++)
1311 		if (!TLB_LOCKED(i))
1312 			tlb_invalidate_entry(i);
1313 
1314 	__asm volatile ("isync");
1315 }
1316 
1317 /* Find a TLB entry to evict. */
1318 static int
ppc4xx_tlb_find_victim(void)1319 ppc4xx_tlb_find_victim(void)
1320 {
1321 	int flags;
1322 
1323 	for (;;) {
1324 		if (++tlbnext >= NTLB)
1325 			tlbnext = tlb_nreserved;
1326 		flags = tlb_info[tlbnext].ti_flags;
1327 		if (!(flags & TLBF_USED) ||
1328 		    (flags & (TLBF_LOCKED | TLBF_REF)) == 0) {
1329 			u_long va, stack = (u_long)&va;
1330 
1331 			if (!((tlb_info[tlbnext].ti_va ^ stack) &
1332 				(~PGOFSET)) &&
1333 			    (tlb_info[tlbnext].ti_ctx == KERNEL_PID) &&
1334 			    (flags & TLBF_USED)) {
1335 				/* Kernel stack page */
1336 				flags |= TLBF_REF;
1337 				tlb_info[tlbnext].ti_flags = flags;
1338 			} else {
1339 				/* Found it! */
1340 				return tlbnext;
1341 			}
1342 		} else
1343 			tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF);
1344 	}
1345 }
1346 
1347 void
ppc4xx_tlb_enter(int ctx,vaddr_t va,u_int pte)1348 ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte)
1349 {
1350 	u_long hi, lo, i;
1351 	paddr_t pa;
1352 	int msr, pid, sz;
1353 
1354 	tlbenter_ev.ev_count++;
1355 
1356 	sz = (pte & TTE_SZ_MASK) >> TTE_SZ_SHIFT;
1357 	pa = (pte & TTE_RPN_MASK(sz));
1358 	hi = (va & TLB_EPN_MASK) | (sz << TLB_SIZE_SHFT) | TLB_VALID;
1359 	lo = (pte & ~TLB_RPN_MASK) | pa;
1360 	lo |= ppc4xx_tlbflags(va, pa);
1361 
1362 	i = ppc4xx_tlb_find_victim();
1363 
1364 	KASSERTMSG(i >= tlb_nreserved && i < NTLB,
1365 	    "invalid entry %ld", i);
1366 
1367 	tlb_info[i].ti_va = (va & TLB_EPN_MASK);
1368 	tlb_info[i].ti_ctx = ctx;
1369 	tlb_info[i].ti_flags = TLBF_USED | TLBF_REF;
1370 
1371 	__asm volatile (
1372 		"mfmsr	%[msr];"		/* Save MSR */
1373 		"li	%[pid],0;"
1374 		"mtmsr	%[pid];"		/* Clear MSR */
1375 		"isync;"
1376 		"tlbwe	%[pid],%[i],0;"		/* Invalidate old entry. */
1377 		MFPID(%[pid])			/* Save old PID */
1378 		MTPID(%[ctx])			/* Load translation ctx */
1379 		"isync;"
1380 		"tlbwe	%[lo],%[i],1;"		/* Set TLB */
1381 		"tlbwe	%[hi],%[i],0;"
1382 		"isync;"
1383 		MTPID(%[pid])			/* Restore PID */
1384 		"mtmsr	%[msr];"		/* and MSR */
1385 		"isync;"
1386 		: [msr] "=&r" (msr), [pid] "=&r" (pid)
1387 		: [ctx] "r" (ctx), [i] "r" (i), [lo] "r" (lo), [hi] "r" (hi));
1388 }
1389 
1390 void
ppc4xx_tlb_init(void)1391 ppc4xx_tlb_init(void)
1392 {
1393 	int i;
1394 
1395 	/* Mark reserved TLB entries */
1396 	for (i = 0; i < tlb_nreserved; i++) {
1397 		tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED;
1398 		tlb_info[i].ti_ctx = KERNEL_PID;
1399 	}
1400 
1401 	/* Setup security zones */
1402 	/* Z0 - accessible by kernel only if TLB entry permissions allow
1403 	 * Z1,Z2 - access is controlled by TLB entry permissions
1404 	 * Z3 - full access regardless of TLB entry permissions
1405 	 */
1406 
1407 	__asm volatile (
1408 		"mtspr	%0,%1;"
1409 		"isync;"
1410 		: : "K" (SPR_ZPR), "r" (0x1b000000));
1411 }
1412 
1413 /*
1414  * ppc4xx_tlb_size_mask:
1415  *
1416  * 	Roundup size to supported page size, return TLBHI mask and real size.
1417  */
1418 static int
ppc4xx_tlb_size_mask(size_t size,int * mask,int * rsiz)1419 ppc4xx_tlb_size_mask(size_t size, int *mask, int *rsiz)
1420 {
1421 	int i;
1422 
1423 	for (i = 0; i < __arraycount(tlbsize); i++)
1424 		if (size <= tlbsize[i]) {
1425 			*mask = (i << TLB_SIZE_SHFT);
1426 			*rsiz = tlbsize[i];
1427 			return 0;
1428 		}
1429 	return EINVAL;
1430 }
1431 
1432 /*
1433  * ppc4xx_tlb_mapiodev:
1434  *
1435  * 	Lookup virtual address of mapping previously entered via
1436  * 	ppc4xx_tlb_reserve. Search TLB directly so that we don't
1437  * 	need to waste extra storage for reserved mappings. Note
1438  * 	that reading TLBHI also sets PID, but all reserved mappings
1439  * 	use KERNEL_PID, so the side effect is nil.
1440  */
1441 void *
ppc4xx_tlb_mapiodev(paddr_t base,psize_t len)1442 ppc4xx_tlb_mapiodev(paddr_t base, psize_t len)
1443 {
1444 	paddr_t pa;
1445 	vaddr_t va;
1446 	u_int lo, hi, sz;
1447 	int i;
1448 
1449 	/* tlb_nreserved is only allowed to grow, so this is safe. */
1450 	for (i = 0; i < tlb_nreserved; i++) {
1451 		__asm volatile (
1452 			"tlbre	%[lo],%[i],1;" 	/* TLBLO */
1453 			"tlbre	%[hi],%[i],0;" 	/* TLBHI */
1454 			: [lo] "=&r" (lo), [hi] "=&r" (hi)
1455 			: [i] "r" (i));
1456 
1457 		KASSERT(hi & TLB_VALID);
1458 		KASSERT(mfspr(SPR_PID) == KERNEL_PID);
1459 
1460 		pa = (lo & TLB_RPN_MASK);
1461 		if (base < pa)
1462 			continue;
1463 
1464 		sz = tlbsize[(hi & TLB_SIZE_MASK) >> TLB_SIZE_SHFT];
1465 		if (base + len > pa + sz)
1466 			continue;
1467 
1468 		va = (hi & TLB_EPN_MASK) + (base & (sz - 1)); 	/* sz = 2^n */
1469 		return (void *)va;
1470 	}
1471 
1472 	return NULL;
1473 }
1474 
1475 /*
1476  * ppc4xx_tlb_reserve:
1477  *
1478  * 	Map physical range to kernel virtual chunk via reserved TLB entry.
1479  */
1480 void
ppc4xx_tlb_reserve(paddr_t pa,vaddr_t va,size_t size,int flags)1481 ppc4xx_tlb_reserve(paddr_t pa, vaddr_t va, size_t size, int flags)
1482 {
1483 	u_int lo, hi;
1484 	int szmask, rsize;
1485 
1486 	/* Called before pmap_bootstrap(), va outside kernel space. */
1487 	KASSERT(va < VM_MIN_KERNEL_ADDRESS || va >= VM_MAX_KERNEL_ADDRESS);
1488 	KASSERT(!pmap_bootstrap_done);
1489 	KASSERT(tlb_nreserved < NTLB);
1490 
1491 	/* Resolve size. */
1492 	if (ppc4xx_tlb_size_mask(size, &szmask, &rsize) != 0)
1493 		panic("ppc4xx_tlb_reserve: entry %d, %zuB too large",
1494 		    size, tlb_nreserved);
1495 
1496 	/* Real size will be power of two >= 1024, so this is OK. */
1497 	pa &= ~(rsize - 1); 	/* RPN */
1498 	va &= ~(rsize - 1); 	/* EPN */
1499 
1500 	lo = pa | TLB_WR | flags;
1501 	hi = va | TLB_VALID | szmask;
1502 
1503 #ifdef PPC_4XX_NOCACHE
1504 	lo |= TLB_I;
1505 #endif
1506 
1507 	__asm volatile (
1508 		"tlbwe	%[lo],%[i],1;"	/* write TLBLO */
1509 		"tlbwe	%[hi],%[i],0;"	/* write TLBHI */
1510 		"isync;"
1511 		: : [i] "r" (tlb_nreserved), [lo] "r" (lo), [hi] "r" (hi));
1512 
1513 	tlb_nreserved++;
1514 }
1515 
1516 /*
1517  * We should pass the ctx in from trap code.
1518  */
1519 int
pmap_tlbmiss(vaddr_t va,int ctx)1520 pmap_tlbmiss(vaddr_t va, int ctx)
1521 {
1522 	volatile u_int *pte;
1523 	u_long tte;
1524 
1525 	tlbmiss_ev.ev_count++;
1526 
1527 	/*
1528 	 * We will reserve 0 upto VM_MIN_KERNEL_ADDRESS for va == pa mappings.
1529 	 * Physical RAM is expected to live in this range, care must be taken
1530 	 * to not clobber 0 upto ${physmem} with device mappings in machdep
1531 	 * code.
1532 	 */
1533 	if (ctx != KERNEL_PID ||
1534 	    (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS)) {
1535 		pte = pte_find((struct pmap *)__UNVOLATILE(ctxbusy[ctx]), va);
1536 		if (pte == NULL) {
1537 			/*
1538 			 * Map unmanaged addresses directly for
1539 			 * kernel access
1540 			 */
1541 			return 1;
1542 		}
1543 		tte = *pte;
1544 		if (tte == 0)
1545 			return 1;
1546 	} else {
1547 		/* Create a 16MB writable mapping. */
1548 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR;
1549 #ifdef PPC_4XX_NOCACHE
1550 		tte |= TTE_I;
1551 #endif
1552 	}
1553 	ppc4xx_tlb_enter(ctx, va, tte);
1554 
1555 	return 0;
1556 }
1557 
1558 /*
1559  * Flush all the entries matching a context from the TLB.
1560  */
1561 static void
ctx_flush(int cnum)1562 ctx_flush(int cnum)
1563 {
1564 	int i;
1565 
1566 	/* We gotta steal this context */
1567 	for (i = tlb_nreserved; i < NTLB; i++) {
1568 		if (tlb_info[i].ti_ctx == cnum) {
1569 			/* Can't steal ctx if it has locked/reserved entry. */
1570 			KASSERTMSG(!TLB_LOCKED(i) && i >= tlb_nreserved,
1571 			    "locked/reserved entry %d for ctx %d",
1572 			    i, cnum);
1573 			/*
1574 			 * Invalidate particular TLB entry regardless of
1575 			 * locked status
1576 			 */
1577 			tlb_invalidate_entry(i);
1578 		}
1579 	}
1580 }
1581 
1582 /*
1583  * Allocate a context.  If necessary, steal one from someone else.
1584  *
1585  * The new context is flushed from the TLB before returning.
1586  */
1587 int
ctx_alloc(struct pmap * pm)1588 ctx_alloc(struct pmap *pm)
1589 {
1590 	static int next = MINCTX;
1591 	int cnum, s;
1592 
1593 	KASSERT(pm != pmap_kernel());
1594 
1595 	s = splvm();
1596 
1597 	/* Find a likely context. */
1598 	cnum = next;
1599 	do {
1600 		if (++cnum >= NUMCTX)
1601 			cnum = MINCTX;
1602 	} while (ctxbusy[cnum] != NULL && cnum != next);
1603 
1604 	/* Now clean it out */
1605 	if (cnum < MINCTX)
1606 		cnum = MINCTX; /* Never steal ctx 0 or 1 */
1607 	ctx_flush(cnum);
1608 
1609 	if (ctxbusy[cnum]) {
1610 #ifdef DEBUG
1611 		/* We should identify this pmap and clear it */
1612 		printf("Warning: stealing context %d\n", cnum);
1613 #endif
1614 		ctxbusy[cnum]->pm_ctx = 0;
1615 	}
1616 	ctxbusy[cnum] = pm;
1617 	next = cnum;
1618 
1619 	splx(s);
1620 
1621 	pm->pm_ctx = cnum;
1622 
1623 	return cnum;
1624 }
1625 
1626 /*
1627  * Give away a context.
1628  */
1629 void
ctx_free(struct pmap * pm)1630 ctx_free(struct pmap *pm)
1631 {
1632 	int oldctx;
1633 
1634 	oldctx = pm->pm_ctx;
1635 
1636 	if (oldctx == 0)
1637 		panic("ctx_free: freeing kernel context");
1638 
1639 	KASSERTMSG(ctxbusy[oldctx] == pm,
1640 	    "ctxbusy[%d] = %p, pm->pm_ctx = %p",
1641 	    oldctx, ctxbusy[oldctx], pm);
1642 
1643 	/* We should verify it has not been stolen and reallocated... */
1644 	ctxbusy[oldctx] = NULL;
1645 	ctx_flush(oldctx);
1646 }
1647 
1648 #ifdef DEBUG
1649 /*
1650  * Test ref/modify handling.
1651  */
1652 void pmap_testout(void);
1653 void
pmap_testout(void)1654 pmap_testout(void)
1655 {
1656 	struct vm_page *pg;
1657 	vaddr_t va;
1658 	paddr_t pa;
1659 	volatile int *loc;
1660 	int ref, mod, val = 0;
1661 
1662 	/* Allocate a page */
1663 	va = (vaddr_t)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
1664 	    UVM_KMF_WIRED | UVM_KMF_ZERO);
1665 	loc = (int *)va;
1666 
1667 	pmap_extract(pmap_kernel(), va, &pa);
1668 	pg = PHYS_TO_VM_PAGE(pa);
1669 	pmap_unwire(pmap_kernel(), va);
1670 
1671 	pmap_kremove(va, PAGE_SIZE);
1672 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1673 	pmap_update(pmap_kernel());
1674 
1675 	/* Now clear reference and modify */
1676 	ref = pmap_clear_reference(pg);
1677 	mod = pmap_clear_modify(pg);
1678 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1679 	    (void *)(u_long)va, (long)pa, ref, mod);
1680 
1681 	/* Check it's properly cleared */
1682 	ref = pmap_is_referenced(pg);
1683 	mod = pmap_is_modified(pg);
1684 	printf("Checking cleared page: ref %d, mod %d\n", ref, mod);
1685 
1686 	/* Reference page */
1687 	val = *loc;
1688 
1689 	ref = pmap_is_referenced(pg);
1690 	mod = pmap_is_modified(pg);
1691 	printf("Referenced page: ref %d, mod %d val %x\n", ref, mod, val);
1692 
1693 	/* Now clear reference and modify */
1694 	ref = pmap_clear_reference(pg);
1695 	mod = pmap_clear_modify(pg);
1696 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1697 	    (void *)(u_long)va, (long)pa, ref, mod);
1698 
1699 	/* Modify page */
1700 	*loc = 1;
1701 
1702 	ref = pmap_is_referenced(pg);
1703 	mod = pmap_is_modified(pg);
1704 	printf("Modified page: ref %d, mod %d\n", ref, mod);
1705 
1706 	/* Now clear reference and modify */
1707 	ref = pmap_clear_reference(pg);
1708 	mod = pmap_clear_modify(pg);
1709 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1710 	    (void *)(u_long)va, (long)pa, ref, mod);
1711 
1712 	/* Check it's properly cleared */
1713 	ref = pmap_is_referenced(pg);
1714 	mod = pmap_is_modified(pg);
1715 	printf("Checking cleared page: ref %d, mod %d\n", ref, mod);
1716 
1717 	/* Modify page */
1718 	*loc = 1;
1719 
1720 	ref = pmap_is_referenced(pg);
1721 	mod = pmap_is_modified(pg);
1722 	printf("Modified page: ref %d, mod %d\n", ref, mod);
1723 
1724 	/* Check pmap_protect() */
1725 	pmap_protect(pmap_kernel(), va, va + PAGE_SIZE, VM_PROT_READ);
1726 	pmap_update(pmap_kernel());
1727 	ref = pmap_is_referenced(pg);
1728 	mod = pmap_is_modified(pg);
1729 	printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n", ref, mod);
1730 
1731 	/* Now clear reference and modify */
1732 	ref = pmap_clear_reference(pg);
1733 	mod = pmap_clear_modify(pg);
1734 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1735 	    (void *)(u_long)va, (long)pa, ref, mod);
1736 
1737 	/* Reference page */
1738 	val = *loc;
1739 
1740 	ref = pmap_is_referenced(pg);
1741 	mod = pmap_is_modified(pg);
1742 	printf("Referenced page: ref %d, mod %d val %x\n", ref, mod, val);
1743 
1744 	/* Now clear reference and modify */
1745 	ref = pmap_clear_reference(pg);
1746 	mod = pmap_clear_modify(pg);
1747 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1748 	    (void *)(u_long)va, (long)pa, ref, mod);
1749 
1750 	/* Modify page */
1751 #if 0
1752 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1753 	pmap_update(pmap_kernel());
1754 #endif
1755 	*loc = 1;
1756 
1757 	ref = pmap_is_referenced(pg);
1758 	mod = pmap_is_modified(pg);
1759 	printf("Modified page: ref %d, mod %d\n", ref, mod);
1760 
1761 	/* Check pmap_protect() */
1762 	pmap_protect(pmap_kernel(), va, va + PAGE_SIZE, VM_PROT_NONE);
1763 	pmap_update(pmap_kernel());
1764 	ref = pmap_is_referenced(pg);
1765 	mod = pmap_is_modified(pg);
1766 	printf("pmap_protect(): ref %d, mod %d\n", ref, mod);
1767 
1768 	/* Now clear reference and modify */
1769 	ref = pmap_clear_reference(pg);
1770 	mod = pmap_clear_modify(pg);
1771 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1772 	    (void *)(u_long)va, (long)pa, ref, mod);
1773 
1774 	/* Reference page */
1775 	val = *loc;
1776 
1777 	ref = pmap_is_referenced(pg);
1778 	mod = pmap_is_modified(pg);
1779 	printf("Referenced page: ref %d, mod %d val %x\n", ref, mod, val);
1780 
1781 	/* Now clear reference and modify */
1782 	ref = pmap_clear_reference(pg);
1783 	mod = pmap_clear_modify(pg);
1784 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1785 	    (void *)(u_long)va, (long)pa, ref, mod);
1786 
1787 	/* Modify page */
1788 #if 0
1789 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1790 	pmap_update(pmap_kernel());
1791 #endif
1792 	*loc = 1;
1793 
1794 	ref = pmap_is_referenced(pg);
1795 	mod = pmap_is_modified(pg);
1796 	printf("Modified page: ref %d, mod %d\n", ref, mod);
1797 
1798 	/* Check pmap_pag_protect() */
1799 	pmap_page_protect(pg, VM_PROT_READ);
1800 	ref = pmap_is_referenced(pg);
1801 	mod = pmap_is_modified(pg);
1802 	printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n", ref, mod);
1803 
1804 	/* Now clear reference and modify */
1805 	ref = pmap_clear_reference(pg);
1806 	mod = pmap_clear_modify(pg);
1807 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1808 	    (void *)(u_long)va, (long)pa, ref, mod);
1809 
1810 	/* Reference page */
1811 	val = *loc;
1812 
1813 	ref = pmap_is_referenced(pg);
1814 	mod = pmap_is_modified(pg);
1815 	printf("Referenced page: ref %d, mod %d val %x\n", ref, mod, val);
1816 
1817 	/* Now clear reference and modify */
1818 	ref = pmap_clear_reference(pg);
1819 	mod = pmap_clear_modify(pg);
1820 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1821 	    (void *)(u_long)va, (long)pa, ref, mod);
1822 
1823 	/* Modify page */
1824 #if 0
1825 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1826 	pmap_update(pmap_kernel());
1827 #endif
1828 	*loc = 1;
1829 
1830 	ref = pmap_is_referenced(pg);
1831 	mod = pmap_is_modified(pg);
1832 	printf("Modified page: ref %d, mod %d\n", ref, mod);
1833 
1834 	/* Check pmap_pag_protect() */
1835 	pmap_page_protect(pg, VM_PROT_NONE);
1836 	ref = pmap_is_referenced(pg);
1837 	mod = pmap_is_modified(pg);
1838 	printf("pmap_page_protect(): ref %d, mod %d\n", ref, mod);
1839 
1840 	/* Now clear reference and modify */
1841 	ref = pmap_clear_reference(pg);
1842 	mod = pmap_clear_modify(pg);
1843 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1844 	    (void *)(u_long)va, (long)pa, ref, mod);
1845 
1846 
1847 	/* Reference page */
1848 	val = *loc;
1849 
1850 	ref = pmap_is_referenced(pg);
1851 	mod = pmap_is_modified(pg);
1852 	printf("Referenced page: ref %d, mod %d val %x\n", ref, mod, val);
1853 
1854 	/* Now clear reference and modify */
1855 	ref = pmap_clear_reference(pg);
1856 	mod = pmap_clear_modify(pg);
1857 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1858 	    (void *)(u_long)va, (long)pa, ref, mod);
1859 
1860 	/* Modify page */
1861 #if 0
1862 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
1863 	pmap_update(pmap_kernel());
1864 #endif
1865 	*loc = 1;
1866 
1867 	ref = pmap_is_referenced(pg);
1868 	mod = pmap_is_modified(pg);
1869 	printf("Modified page: ref %d, mod %d\n", ref, mod);
1870 
1871 	/* Unmap page */
1872 	pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
1873 	pmap_update(pmap_kernel());
1874 	ref = pmap_is_referenced(pg);
1875 	mod = pmap_is_modified(pg);
1876 	printf("Unmapped page: ref %d, mod %d\n", ref, mod);
1877 
1878 	/* Now clear reference and modify */
1879 	ref = pmap_clear_reference(pg);
1880 	mod = pmap_clear_modify(pg);
1881 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1882 	    (void *)(u_long)va, (long)pa, ref, mod);
1883 
1884 	/* Check it's properly cleared */
1885 	ref = pmap_is_referenced(pg);
1886 	mod = pmap_is_modified(pg);
1887 	printf("Checking cleared page: ref %d, mod %d\n", ref, mod);
1888 
1889 	pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
1890 	pmap_kenter_pa(va, pa, VM_PROT_ALL, 0);
1891 	uvm_km_free(kernel_map, (vaddr_t)va, PAGE_SIZE, UVM_KMF_WIRED);
1892 }
1893 #endif
1894