xref: /netbsd/sys/arch/m68k/m68k/pmap_motorola.c (revision dabf5da9)
1 /*	$NetBSD: pmap_motorola.c,v 1.77 2022/07/31 17:11:41 chs Exp $        */
2 
3 /*-
4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 1991, 1993
34  *	The Regents of the University of California.  All rights reserved.
35  *
36  * This code is derived from software contributed to Berkeley by
37  * the Systems Programming Group of the University of Utah Computer
38  * Science Department.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. Neither the name of the University nor the names of its contributors
49  *    may be used to endorse or promote products derived from this software
50  *    without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62  * SUCH DAMAGE.
63  *
64  *	@(#)pmap.c	8.6 (Berkeley) 5/27/94
65  */
66 
67 /*
68  * Motorola m68k-family physical map management code.
69  *
70  * Supports:
71  *	68020 with 68851 MMU
72  *	68030 with on-chip MMU
73  *	68040 with on-chip MMU
74  *	68060 with on-chip MMU
75  *
76  * Notes:
77  *	Don't even pay lip service to multiprocessor support.
78  *
79  *	We assume TLB entries don't have process tags (except for the
80  *	supervisor/user distinction) so we only invalidate TLB entries
81  *	when changing mappings for the current (or kernel) pmap.  This is
82  *	technically not true for the 68851 but we flush the TLB on every
83  *	context switch, so it effectively winds up that way.
84  *
85  *	Bitwise and/or operations are significantly faster than bitfield
86  *	references so we use them when accessing STE/PTEs in the pmap_pte_*
87  *	macros.  Note also that the two are not always equivalent; e.g.:
88  *		(*pte & PG_PROT) [4] != pte->pg_prot [1]
89  *	and a couple of routines that deal with protection and wiring take
90  *	some shortcuts that assume the and/or definitions.
91  */
92 
93 /*
94  *	Manages physical address maps.
95  *
96  *	In addition to hardware address maps, this
97  *	module is called upon to provide software-use-only
98  *	maps which may or may not be stored in the same
99  *	form as hardware maps.  These pseudo-maps are
100  *	used to store intermediate results from copy
101  *	operations to and from address spaces.
102  *
103  *	Since the information managed by this module is
104  *	also stored by the logical address mapping module,
105  *	this module may throw away valid virtual-to-physical
106  *	mappings at almost any time.  However, invalidations
107  *	of virtual-to-physical mappings must be done as
108  *	requested.
109  *
110  *	In order to cope with hardware architectures which
111  *	make virtual-to-physical map invalidates expensive,
112  *	this module may delay invalidate or reduced protection
113  *	operations until such time as they are actually
114  *	necessary.  This module is given full information as
115  *	to which processors are currently using which maps,
116  *	and to when physical maps must be made correct.
117  */
118 
119 #include "opt_m68k_arch.h"
120 
121 #include <sys/cdefs.h>
122 __KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.77 2022/07/31 17:11:41 chs Exp $");
123 
124 #include <sys/param.h>
125 #include <sys/systm.h>
126 #include <sys/proc.h>
127 #include <sys/malloc.h>
128 #include <sys/pool.h>
129 #include <sys/cpu.h>
130 #include <sys/atomic.h>
131 
132 #include <machine/pte.h>
133 #include <machine/pcb.h>
134 
135 #include <uvm/uvm.h>
136 #include <uvm/uvm_physseg.h>
137 
138 #include <m68k/cacheops.h>
139 
140 #ifdef DEBUG
141 #define PDB_FOLLOW	0x0001
142 #define PDB_INIT	0x0002
143 #define PDB_ENTER	0x0004
144 #define PDB_REMOVE	0x0008
145 #define PDB_CREATE	0x0010
146 #define PDB_PTPAGE	0x0020
147 #define PDB_CACHE	0x0040
148 #define PDB_BITS	0x0080
149 #define PDB_COLLECT	0x0100
150 #define PDB_PROTECT	0x0200
151 #define PDB_SEGTAB	0x0400
152 #define PDB_MULTIMAP	0x0800
153 #define PDB_PARANOIA	0x2000
154 #define PDB_WIRING	0x4000
155 #define PDB_PVDUMP	0x8000
156 
157 int debugmap = 0;
158 int pmapdebug = PDB_PARANOIA;
159 
160 #define	PMAP_DPRINTF(l, x)	if (pmapdebug & (l)) printf x
161 #else /* ! DEBUG */
162 #define	PMAP_DPRINTF(l, x)	/* nothing */
163 #endif /* DEBUG */
164 
165 /*
166  * Get STEs and PTEs for user/kernel address space
167  */
168 #if defined(M68040) || defined(M68060)
169 #define	pmap_ste1(m, v)	\
170 	(&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
171 /* XXX assumes physically contiguous ST pages (if more than one) */
172 #define pmap_ste2(m, v) \
173 	(&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
174 			- (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
175 #if defined(M68020) || defined(M68030)
176 #define	pmap_ste(m, v)	\
177 	(&((m)->pm_stab[(vaddr_t)(v) \
178 			>> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
179 #define pmap_ste_v(m, v) \
180 	(mmutype == MMU_68040 \
181 	 ? ((*pmap_ste1(m, v) & SG_V) && \
182 	    (*pmap_ste2(m, v) & SG_V)) \
183 	 : (*pmap_ste(m, v) & SG_V))
184 #else
185 #define	pmap_ste(m, v)	\
186 	(&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
187 #define pmap_ste_v(m, v) \
188 	((*pmap_ste1(m, v) & SG_V) && (*pmap_ste2(m, v) & SG_V))
189 #endif
190 #else
191 #define	pmap_ste(m, v)	 (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
192 #define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
193 #endif
194 
195 #define pmap_pte(m, v)	(&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
196 #define pmap_pte_pa(pte)	(*(pte) & PG_FRAME)
197 #define pmap_pte_w(pte)		(*(pte) & PG_W)
198 #define pmap_pte_ci(pte)	(*(pte) & PG_CI)
199 #define pmap_pte_m(pte)		(*(pte) & PG_M)
200 #define pmap_pte_u(pte)		(*(pte) & PG_U)
201 #define pmap_pte_prot(pte)	(*(pte) & PG_PROT)
202 #define pmap_pte_v(pte)		(*(pte) & PG_V)
203 
204 #define pmap_pte_set_w(pte, v) \
205 	if (v) *(pte) |= PG_W; else *(pte) &= ~PG_W
206 #define pmap_pte_set_prot(pte, v) \
207 	if (v) *(pte) |= PG_PROT; else *(pte) &= ~PG_PROT
208 #define pmap_pte_w_chg(pte, nw)		((nw) ^ pmap_pte_w(pte))
209 #define pmap_pte_prot_chg(pte, np)	((np) ^ pmap_pte_prot(pte))
210 
211 /*
212  * Given a map and a machine independent protection code,
213  * convert to an m68k protection code.
214  */
215 #define pte_prot(m, p)	(protection_codes[p])
216 u_int	protection_codes[8];
217 
218 /*
219  * Kernel page table page management.
220  */
221 struct kpt_page {
222 	struct kpt_page *kpt_next;	/* link on either used or free list */
223 	vaddr_t		kpt_va;		/* always valid kernel VA */
224 	paddr_t		kpt_pa;		/* PA of this page (for speed) */
225 };
226 struct kpt_page *kpt_free_list, *kpt_used_list;
227 struct kpt_page *kpt_pages;
228 
229 /*
230  * Kernel segment/page table and page table map.
231  * The page table map gives us a level of indirection we need to dynamically
232  * expand the page table.  It is essentially a copy of the segment table
233  * with PTEs instead of STEs.  All are initialized in locore at boot time.
234  * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
235  * Segtabzero is an empty segment table which all processes share til they
236  * reference something.
237  */
238 paddr_t		Sysseg_pa;
239 st_entry_t	*Sysseg;
240 pt_entry_t	*Sysmap, *Sysptmap;
241 st_entry_t	*Segtabzero, *Segtabzeropa;
242 vsize_t		Sysptsize = VM_KERNEL_PT_PAGES;
243 
244 static struct pmap kernel_pmap_store;
245 struct pmap	*const kernel_pmap_ptr = &kernel_pmap_store;
246 struct vm_map	*st_map, *pt_map;
247 struct vm_map st_map_store, pt_map_store;
248 
249 vaddr_t		lwp0uarea;	/* lwp0 u-area VA, initialized in bootstrap */
250 
251 paddr_t		avail_start;	/* PA of first available physical page */
252 paddr_t		avail_end;	/* PA of last available physical page */
253 vsize_t		mem_size;	/* memory size in bytes */
254 vaddr_t		virtual_avail;  /* VA of first avail page (after kernel bss)*/
255 vaddr_t		virtual_end;	/* VA of last avail page (end of kernel AS) */
256 int		page_cnt;	/* number of pages managed by VM system */
257 
258 bool		pmap_initialized = false;	/* Has pmap_init completed? */
259 
260 vaddr_t		m68k_uptbase = M68K_PTBASE;
261 
262 struct pv_header {
263 	struct pv_entry		pvh_first;	/* first PV entry */
264 	uint16_t		pvh_attrs;	/* attributes:
265 						   bits 0-7: PTE bits
266 						   bits 8-15: flags */
267 	uint16_t		pvh_cimappings;	/* # caller-specified CI
268 						   mappings */
269 };
270 
271 #define	PVH_CI		0x10	/* all entries are cache-inhibited */
272 #define	PVH_PTPAGE	0x20	/* entry maps a page table page */
273 
274 struct pv_header *pv_table;
275 TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
276 int		pv_nfree;
277 
278 #ifdef CACHE_HAVE_VAC
279 u_int		pmap_aliasmask;	/* separation at which VA aliasing ok */
280 #endif
281 #if defined(M68040) || defined(M68060)
282 u_int		protostfree;	/* prototype (default) free ST map */
283 #endif
284 
285 pt_entry_t	*caddr1_pte;	/* PTE for CADDR1 */
286 pt_entry_t	*caddr2_pte;	/* PTE for CADDR2 */
287 
288 struct pool	pmap_pmap_pool;	/* memory pool for pmap structures */
289 struct pool	pmap_pv_pool;	/* memory pool for pv entries */
290 
291 #define pmap_alloc_pv()		pool_get(&pmap_pv_pool, PR_NOWAIT)
292 #define pmap_free_pv(pv)	pool_put(&pmap_pv_pool, (pv))
293 
294 #define	PAGE_IS_MANAGED(pa)	(pmap_initialized && uvm_pageismanaged(pa))
295 
296 static inline struct pv_header *
pa_to_pvh(paddr_t pa)297 pa_to_pvh(paddr_t pa)
298 {
299 	uvm_physseg_t bank = 0;	/* XXX gcc4 -Wuninitialized */
300 	psize_t pg = 0;
301 
302 	bank = uvm_physseg_find(atop((pa)), &pg);
303 	return &uvm_physseg_get_pmseg(bank)->pvheader[pg];
304 }
305 
306 /*
307  * Internal routines
308  */
309 void	pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int,
310 			    struct pv_entry **);
311 bool	pmap_testbit(paddr_t, int);
312 bool	pmap_changebit(paddr_t, int, int);
313 int	pmap_enter_ptpage(pmap_t, vaddr_t, bool);
314 void	pmap_ptpage_addref(vaddr_t);
315 int	pmap_ptpage_delref(vaddr_t);
316 void	pmap_pinit(pmap_t);
317 void	pmap_release(pmap_t);
318 
319 #ifdef DEBUG
320 void pmap_pvdump(paddr_t);
321 void pmap_check_wiring(const char *, vaddr_t);
322 #endif
323 
324 /* pmap_remove_mapping flags */
325 #define	PRM_TFLUSH	0x01
326 #define	PRM_CFLUSH	0x02
327 #define	PRM_KEEPPTPAGE	0x04
328 
329 /*
330  * pmap_bootstrap_finalize:	[ INTERFACE ]
331  *
332  *	Initialize lwp0 uarea, curlwp, and curpcb after MMU is turned on,
333  *	using lwp0uarea variable saved during pmap_bootstrap().
334  */
335 void
pmap_bootstrap_finalize(void)336 pmap_bootstrap_finalize(void)
337 {
338 
339 #if !defined(amiga) && !defined(atari)
340 	/*
341 	 * XXX
342 	 * amiga and atari have different pmap initialization functions
343 	 * and they require this earlier.
344 	 */
345 	uvmexp.pagesize = NBPG;
346 	uvm_md_init();
347 #endif
348 
349 	/*
350 	 * Initialize protection array.
351 	 * XXX: Could this have port specific values? Can't this be static?
352 	 */
353 	protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE]     = 0;
354 	protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE]     = PG_RO;
355 	protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE]  = PG_RO;
356 	protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE]  = PG_RO;
357 	protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE]    = PG_RW;
358 	protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
359 	protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE]    = PG_RW;
360 	protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
361 
362 	/*
363 	 * Initialize pmap_kernel().
364 	 */
365 	pmap_kernel()->pm_stpa = (st_entry_t *)Sysseg_pa;
366 	pmap_kernel()->pm_stab = Sysseg;
367 	pmap_kernel()->pm_ptab = Sysmap;
368 #if defined(M68040) || defined(M68060)
369 	if (mmutype == MMU_68040)
370 		pmap_kernel()->pm_stfree = protostfree;
371 #endif
372 	pmap_kernel()->pm_count = 1;
373 
374 	/*
375 	 * Initialize lwp0 uarea, curlwp, and curpcb.
376 	 */
377 	memset((void *)lwp0uarea, 0, USPACE);
378 	uvm_lwp_setuarea(&lwp0, lwp0uarea);
379 	curlwp = &lwp0;
380 	curpcb = lwp_getpcb(&lwp0);
381 }
382 
383 /*
384  * pmap_virtual_space:		[ INTERFACE ]
385  *
386  *	Report the range of available kernel virtual address
387  *	space to the VM system during bootstrap.
388  *
389  *	This is only an interface function if we do not use
390  *	pmap_steal_memory()!
391  *
392  *	Note: no locking is necessary in this function.
393  */
394 void
pmap_virtual_space(vaddr_t * vstartp,vaddr_t * vendp)395 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
396 {
397 
398 	*vstartp = virtual_avail;
399 	*vendp = virtual_end;
400 }
401 
402 /*
403  * pmap_init:			[ INTERFACE ]
404  *
405  *	Initialize the pmap module.  Called by vm_init(), to initialize any
406  *	structures that the pmap system needs to map virtual memory.
407  *
408  *	Note: no locking is necessary in this function.
409  */
410 void
pmap_init(void)411 pmap_init(void)
412 {
413 	vaddr_t		addr, addr2;
414 	vsize_t		s;
415 	struct pv_header *pvh;
416 	int		rv;
417 	int		npages;
418 	uvm_physseg_t	bank;
419 
420 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
421 
422 	/*
423 	 * Before we do anything else, initialize the PTE pointers
424 	 * used by pmap_zero_page() and pmap_copy_page().
425 	 */
426 	caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
427 	caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
428 
429 	PMAP_DPRINTF(PDB_INIT,
430 	    ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
431 	    Sysseg, Sysmap, Sysptmap));
432 	PMAP_DPRINTF(PDB_INIT,
433 	    ("  pstart %lx, pend %lx, vstart %lx, vend %lx\n",
434 	    avail_start, avail_end, virtual_avail, virtual_end));
435 
436 	/*
437 	 * Allocate memory for random pmap data structures.  Includes the
438 	 * initial segment table, pv_head_table and pmap_attributes.
439 	 */
440 	for (page_cnt = 0, bank = uvm_physseg_get_first();
441 	     uvm_physseg_valid_p(bank);
442 	     bank = uvm_physseg_get_next(bank))
443 		page_cnt += uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
444 	s = M68K_STSIZE;					/* Segtabzero */
445 	s += page_cnt * sizeof(struct pv_header);	/* pv table */
446 	s = round_page(s);
447 	addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
448 	if (addr == 0)
449 		panic("pmap_init: can't allocate data structures");
450 
451 	Segtabzero = (st_entry_t *)addr;
452 	(void)pmap_extract(pmap_kernel(), addr,
453 	    (paddr_t *)(void *)&Segtabzeropa);
454 	addr += M68K_STSIZE;
455 
456 	pv_table = (struct pv_header *) addr;
457 	addr += page_cnt * sizeof(struct pv_header);
458 
459 	PMAP_DPRINTF(PDB_INIT, ("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
460 	    "tbl %p\n",
461 	    s, page_cnt, Segtabzero, Segtabzeropa,
462 	    pv_table));
463 
464 	/*
465 	 * Now that the pv and attribute tables have been allocated,
466 	 * assign them to the memory segments.
467 	 */
468 	pvh = pv_table;
469 	for (bank = uvm_physseg_get_first();
470 	     uvm_physseg_valid_p(bank);
471 	     bank = uvm_physseg_get_next(bank)) {
472 		npages = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
473 		uvm_physseg_get_pmseg(bank)->pvheader = pvh;
474 		pvh += npages;
475 	}
476 
477 	/*
478 	 * Allocate physical memory for kernel PT pages and their management.
479 	 * We need 1 PT page per possible task plus some slop.
480 	 */
481 	npages = uimin(atop(M68K_MAX_KPTSIZE), maxproc+16);
482 	s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
483 
484 	/*
485 	 * Verify that space will be allocated in region for which
486 	 * we already have kernel PT pages.
487 	 */
488 	addr = 0;
489 	rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
490 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
491 	    UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
492 	if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap)
493 		panic("pmap_init: kernel PT too small");
494 	uvm_unmap(kernel_map, addr, addr + s);
495 
496 	/*
497 	 * Now allocate the space and link the pages together to
498 	 * form the KPT free list.
499 	 */
500 	addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
501 	if (addr == 0)
502 		panic("pmap_init: cannot allocate KPT free list");
503 	s = ptoa(npages);
504 	addr2 = addr + s;
505 	kpt_pages = &((struct kpt_page *)addr2)[npages];
506 	kpt_free_list = NULL;
507 	do {
508 		addr2 -= PAGE_SIZE;
509 		(--kpt_pages)->kpt_next = kpt_free_list;
510 		kpt_free_list = kpt_pages;
511 		kpt_pages->kpt_va = addr2;
512 		(void) pmap_extract(pmap_kernel(), addr2,
513 		    (paddr_t *)&kpt_pages->kpt_pa);
514 	} while (addr != addr2);
515 
516 	PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
517 	    atop(s), addr, addr + s));
518 
519 	/*
520 	 * Allocate the segment table map and the page table map.
521 	 */
522 	s = maxproc * M68K_STSIZE;
523 	st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, false,
524 	    &st_map_store);
525 
526 	addr = m68k_uptbase;
527 	if ((M68K_PTMAXSIZE / M68K_MAX_PTSIZE) < maxproc) {
528 		s = M68K_PTMAXSIZE;
529 		/*
530 		 * XXX We don't want to hang when we run out of
531 		 * page tables, so we lower maxproc so that fork()
532 		 * will fail instead.  Note that root could still raise
533 		 * this value via sysctl(3).
534 		 */
535 		maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE);
536 	} else
537 		s = (maxproc * M68K_MAX_PTSIZE);
538 	pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
539 	    true, &pt_map_store);
540 
541 #if defined(M68040) || defined(M68060)
542 	if (mmutype == MMU_68040) {
543 		protostfree = ~l2tobm(0);
544 		for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
545 			protostfree &= ~l2tobm(rv);
546 	}
547 #endif
548 
549 	/*
550 	 * Initialize the pmap pools.
551 	 */
552 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
553 	    &pool_allocator_nointr, IPL_NONE);
554 
555 	/*
556 	 * Initialize the pv_entry pools.
557 	 */
558 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
559 	    &pool_allocator_meta, IPL_NONE);
560 
561 	/*
562 	 * Now that this is done, mark the pages shared with the
563 	 * hardware page table search as non-CCB (actually, as CI).
564 	 *
565 	 * XXX Hm. Given that this is in the kernel map, can't we just
566 	 * use the va's?
567 	 */
568 #ifdef M68060
569 #if defined(M68020) || defined(M68030) || defined(M68040)
570 	if (cputype == CPU_68060)
571 #endif
572 	{
573 		struct kpt_page *kptp = kpt_free_list;
574 		paddr_t paddr;
575 
576 		while (kptp) {
577 			pmap_changebit(kptp->kpt_pa, PG_CI, ~PG_CCB);
578 			kptp = kptp->kpt_next;
579 		}
580 
581 		paddr = (paddr_t)Segtabzeropa;
582 		while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) {
583 			pmap_changebit(paddr, PG_CI, ~PG_CCB);
584 			paddr += PAGE_SIZE;
585 		}
586 
587 		DCIS();
588 	}
589 #endif
590 
591 	/*
592 	 * Now it is safe to enable pv_table recording.
593 	 */
594 	pmap_initialized = true;
595 }
596 
597 /*
598  * pmap_map:
599  *
600  *	Used to map a range of physical addresses into kernel
601  *	virtual address space.
602  *
603  *	For now, VM is already on, we only need to map the
604  *	specified memory.
605  *
606  *	Note: THIS FUNCTION IS DEPRECATED, AND SHOULD BE REMOVED!
607  */
608 vaddr_t
pmap_map(vaddr_t va,paddr_t spa,paddr_t epa,int prot)609 pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, int prot)
610 {
611 
612 	PMAP_DPRINTF(PDB_FOLLOW,
613 	    ("pmap_map(%lx, %lx, %lx, %x)\n", va, spa, epa, prot));
614 
615 	while (spa < epa) {
616 		pmap_enter(pmap_kernel(), va, spa, prot, 0);
617 		va += PAGE_SIZE;
618 		spa += PAGE_SIZE;
619 	}
620 	pmap_update(pmap_kernel());
621 	return va;
622 }
623 
624 /*
625  * pmap_create:			[ INTERFACE ]
626  *
627  *	Create and return a physical map.
628  *
629  *	Note: no locking is necessary in this function.
630  */
631 pmap_t
pmap_create(void)632 pmap_create(void)
633 {
634 	struct pmap *pmap;
635 
636 	PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
637 	    ("pmap_create()\n"));
638 
639 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
640 	memset(pmap, 0, sizeof(*pmap));
641 	pmap_pinit(pmap);
642 	return pmap;
643 }
644 
645 /*
646  * pmap_pinit:
647  *
648  *	Initialize a preallocated and zeroed pmap structure.
649  *
650  *	Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_create()!
651  */
652 void
pmap_pinit(struct pmap * pmap)653 pmap_pinit(struct pmap *pmap)
654 {
655 
656 	PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
657 	    ("pmap_pinit(%p)\n", pmap));
658 
659 	/*
660 	 * No need to allocate page table space yet but we do need a
661 	 * valid segment table.  Initially, we point everyone at the
662 	 * "null" segment table.  On the first pmap_enter, a real
663 	 * segment table will be allocated.
664 	 */
665 	pmap->pm_stab = Segtabzero;
666 	pmap->pm_stpa = Segtabzeropa;
667 #if defined(M68040) || defined(M68060)
668 #if defined(M68020) || defined(M68030)
669 	if (mmutype == MMU_68040)
670 #endif
671 		pmap->pm_stfree = protostfree;
672 #endif
673 	pmap->pm_count = 1;
674 }
675 
676 /*
677  * pmap_destroy:		[ INTERFACE ]
678  *
679  *	Drop the reference count on the specified pmap, releasing
680  *	all resources if the reference count drops to zero.
681  */
682 void
pmap_destroy(pmap_t pmap)683 pmap_destroy(pmap_t pmap)
684 {
685 	int count;
686 
687 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
688 
689 	count = atomic_dec_uint_nv(&pmap->pm_count);
690 	if (count == 0) {
691 		pmap_release(pmap);
692 		pool_put(&pmap_pmap_pool, pmap);
693 	}
694 }
695 
696 /*
697  * pmap_release:
698  *
699  *	Release the resources held by a pmap.
700  *
701  *	Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_destroy().
702  */
703 void
pmap_release(pmap_t pmap)704 pmap_release(pmap_t pmap)
705 {
706 
707 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap));
708 
709 #ifdef notdef /* DIAGNOSTIC */
710 	/* count would be 0 from pmap_destroy... */
711 	if (pmap->pm_count != 1)
712 		panic("pmap_release count");
713 #endif
714 
715 	if (pmap->pm_ptab) {
716 		pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
717 		    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
718 		uvm_km_pgremove((vaddr_t)pmap->pm_ptab,
719 		    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
720 		uvm_km_free(pt_map, (vaddr_t)pmap->pm_ptab,
721 		    M68K_MAX_PTSIZE, UVM_KMF_VAONLY);
722 	}
723 	KASSERT(pmap->pm_stab == Segtabzero);
724 }
725 
726 /*
727  * pmap_reference:		[ INTERFACE ]
728  *
729  *	Add a reference to the specified pmap.
730  */
731 void
pmap_reference(pmap_t pmap)732 pmap_reference(pmap_t pmap)
733 {
734 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
735 
736 	atomic_inc_uint(&pmap->pm_count);
737 }
738 
739 /*
740  * pmap_activate:		[ INTERFACE ]
741  *
742  *	Activate the pmap used by the specified process.  This includes
743  *	reloading the MMU context if the current process, and marking
744  *	the pmap in use by the processor.
745  *
746  *	Note: we may only use spin locks here, since we are called
747  *	by a critical section in cpu_switch()!
748  */
749 void
pmap_activate(struct lwp * l)750 pmap_activate(struct lwp *l)
751 {
752 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
753 
754 	PMAP_DPRINTF(PDB_FOLLOW|PDB_SEGTAB,
755 	    ("pmap_activate(%p)\n", l));
756 
757 	PMAP_ACTIVATE(pmap, (curlwp->l_flag & LW_IDLE) != 0 ||
758 	    l->l_proc == curproc);
759 }
760 
761 /*
762  * pmap_deactivate:		[ INTERFACE ]
763  *
764  *	Mark that the pmap used by the specified process is no longer
765  *	in use by the processor.
766  *
767  *	The comment above pmap_activate() wrt. locking applies here,
768  *	as well.
769  */
770 void
pmap_deactivate(struct lwp * l)771 pmap_deactivate(struct lwp *l)
772 {
773 
774 	/* No action necessary in this pmap implementation. */
775 }
776 
777 /*
778  * pmap_remove:			[ INTERFACE ]
779  *
780  *	Remove the given range of addresses from the specified map.
781  *
782  *	It is assumed that the start and end are properly
783  *	rounded to the page size.
784  */
785 void
pmap_remove(pmap_t pmap,vaddr_t sva,vaddr_t eva)786 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
787 {
788 	vaddr_t nssva;
789 	pt_entry_t *pte;
790 	int flags;
791 #ifdef CACHE_HAVE_VAC
792 	bool firstpage = true, needcflush = false;
793 #endif
794 
795 	PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
796 	    ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
797 
798 	flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
799 	while (sva < eva) {
800 		nssva = m68k_trunc_seg(sva) + NBSEG;
801 		if (nssva == 0 || nssva > eva)
802 			nssva = eva;
803 
804 		/*
805 		 * Invalidate every valid mapping within this segment.
806 		 */
807 
808 		pte = pmap_pte(pmap, sva);
809 		while (sva < nssva) {
810 
811 			/*
812 			 * If this segment is unallocated,
813 			 * skip to the next segment boundary.
814 			 */
815 
816 			if (!pmap_ste_v(pmap, sva)) {
817 				sva = nssva;
818 				break;
819 			}
820 
821 			if (pmap_pte_v(pte)) {
822 #ifdef CACHE_HAVE_VAC
823 				if (pmap_aliasmask) {
824 
825 					/*
826 					 * Purge kernel side of VAC to ensure
827 					 * we get the correct state of any
828 					 * hardware maintained bits.
829 					 */
830 
831 					if (firstpage) {
832 						DCIS();
833 					}
834 
835 					/*
836 					 * Remember if we may need to
837 					 * flush the VAC due to a non-CI
838 					 * mapping.
839 					 */
840 
841 					if (!needcflush && !pmap_pte_ci(pte))
842 						needcflush = true;
843 
844 				}
845 				firstpage = false;
846 #endif
847 				pmap_remove_mapping(pmap, sva, pte, flags, NULL);
848 			}
849 			pte++;
850 			sva += PAGE_SIZE;
851 		}
852 	}
853 
854 #ifdef CACHE_HAVE_VAC
855 
856 	/*
857 	 * Didn't do anything, no need for cache flushes
858 	 */
859 
860 	if (firstpage)
861 		return;
862 
863 	/*
864 	 * In a couple of cases, we don't need to worry about flushing
865 	 * the VAC:
866 	 * 	1. if this is a kernel mapping,
867 	 *	   we have already done it
868 	 *	2. if it is a user mapping not for the current process,
869 	 *	   it won't be there
870 	 */
871 
872 	if (pmap_aliasmask && !active_user_pmap(pmap))
873 		needcflush = false;
874 	if (needcflush) {
875 		if (pmap == pmap_kernel()) {
876 			DCIS();
877 		} else {
878 			DCIU();
879 		}
880 	}
881 #endif
882 }
883 
884 /*
885  * pmap_page_protect:		[ INTERFACE ]
886  *
887  *	Lower the permission for all mappings to a given page to
888  *	the permissions specified.
889  */
890 void
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)891 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
892 {
893 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
894 	struct pv_header *pvh;
895 	struct pv_entry *pv;
896 	pt_entry_t *pte;
897 	int s;
898 
899 #ifdef DEBUG
900 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
901 	    (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
902 		printf("pmap_page_protect(%p, %x)\n", pg, prot);
903 #endif
904 
905 	switch (prot) {
906 	case VM_PROT_READ|VM_PROT_WRITE:
907 	case VM_PROT_ALL:
908 		return;
909 
910 	/* copy_on_write */
911 	case VM_PROT_READ:
912 	case VM_PROT_READ|VM_PROT_EXECUTE:
913 		pmap_changebit(pa, PG_RO, ~0);
914 		return;
915 
916 	/* remove_all */
917 	default:
918 		break;
919 	}
920 
921 	pvh = pa_to_pvh(pa);
922 	pv = &pvh->pvh_first;
923 	s = splvm();
924 	while (pv->pv_pmap != NULL) {
925 
926 		pte = pmap_pte(pv->pv_pmap, pv->pv_va);
927 #ifdef DEBUG
928 		if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
929 		    pmap_pte_pa(pte) != pa)
930 			panic("pmap_page_protect: bad mapping");
931 #endif
932 		pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
933 		    pte, PRM_TFLUSH|PRM_CFLUSH, NULL);
934 	}
935 	splx(s);
936 }
937 
938 /*
939  * pmap_protect:		[ INTERFACE ]
940  *
941  *	Set the physical protection on the specified range of this map
942  *	as requested.
943  */
944 void
pmap_protect(pmap_t pmap,vaddr_t sva,vaddr_t eva,vm_prot_t prot)945 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
946 {
947 	vaddr_t nssva;
948 	pt_entry_t *pte;
949 	bool firstpage __unused, needtflush;
950 	int isro;
951 
952 	PMAP_DPRINTF(PDB_FOLLOW|PDB_PROTECT,
953 	    ("pmap_protect(%p, %lx, %lx, %x)\n",
954 	    pmap, sva, eva, prot));
955 
956 #ifdef PMAPSTATS
957 	protect_stats.calls++;
958 #endif
959 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
960 		pmap_remove(pmap, sva, eva);
961 		return;
962 	}
963 	isro = pte_prot(pmap, prot);
964 	needtflush = active_pmap(pmap);
965 	firstpage = true;
966 	while (sva < eva) {
967 		nssva = m68k_trunc_seg(sva) + NBSEG;
968 		if (nssva == 0 || nssva > eva)
969 			nssva = eva;
970 
971 		/*
972 		 * If VA belongs to an unallocated segment,
973 		 * skip to the next segment boundary.
974 		 */
975 
976 		if (!pmap_ste_v(pmap, sva)) {
977 			sva = nssva;
978 			continue;
979 		}
980 
981 		/*
982 		 * Change protection on mapping if it is valid and doesn't
983 		 * already have the correct protection.
984 		 */
985 
986 		pte = pmap_pte(pmap, sva);
987 		while (sva < nssva) {
988 			if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
989 #ifdef CACHE_HAVE_VAC
990 
991 				/*
992 				 * Purge kernel side of VAC to ensure we
993 				 * get the correct state of any hardware
994 				 * maintained bits.
995 				 *
996 				 * XXX do we need to clear the VAC in
997 				 * general to reflect the new protection?
998 				 */
999 
1000 				if (firstpage && pmap_aliasmask)
1001 					DCIS();
1002 #endif
1003 
1004 #if defined(M68040) || defined(M68060)
1005 
1006 				/*
1007 				 * Clear caches if making RO (see section
1008 				 * "7.3 Cache Coherency" in the manual).
1009 				 */
1010 
1011 #if defined(M68020) || defined(M68030)
1012 				if (isro && mmutype == MMU_68040)
1013 #else
1014 				if (isro)
1015 #endif
1016 				{
1017 					paddr_t pa = pmap_pte_pa(pte);
1018 
1019 					DCFP(pa);
1020 					ICPP(pa);
1021 				}
1022 #endif
1023 				pmap_pte_set_prot(pte, isro);
1024 				if (needtflush)
1025 					TBIS(sva);
1026 				firstpage = false;
1027 			}
1028 			pte++;
1029 			sva += PAGE_SIZE;
1030 		}
1031 	}
1032 }
1033 
1034 /*
1035  * pmap_enter:			[ INTERFACE ]
1036  *
1037  *	Insert the given physical page (pa) at
1038  *	the specified virtual address (va) in the
1039  *	target physical map with the protection requested.
1040  *
1041  *	If specified, the page will be wired down, meaning
1042  *	that the related pte cannot be reclaimed.
1043  *
1044  *	Note: This is the only routine which MAY NOT lazy-evaluate
1045  *	or lose information.  Thatis, this routine must actually
1046  *	insert this page into the given map NOW.
1047  */
1048 int
pmap_enter(pmap_t pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)1049 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1050 {
1051 	pt_entry_t *pte;
1052 	struct pv_entry *opv = NULL;
1053 	int npte;
1054 	paddr_t opa;
1055 	bool cacheable = true;
1056 	bool checkpv = true;
1057 	bool wired = (flags & PMAP_WIRED) != 0;
1058 	bool can_fail = (flags & PMAP_CANFAIL) != 0;
1059 
1060 	PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
1061 	    ("pmap_enter(%p, %lx, %lx, %x, %x)\n",
1062 	    pmap, va, pa, prot, wired));
1063 
1064 #ifdef DIAGNOSTIC
1065 	/*
1066 	 * pmap_enter() should never be used for CADDR1 and CADDR2.
1067 	 */
1068 	if (pmap == pmap_kernel() &&
1069 	    (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2))
1070 		panic("pmap_enter: used for CADDR1 or CADDR2");
1071 #endif
1072 
1073 	/*
1074 	 * For user mapping, allocate kernel VM resources if necessary.
1075 	 */
1076 	if (pmap->pm_ptab == NULL) {
1077 		pmap->pm_ptab = (pt_entry_t *)
1078 		    uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0,
1079 		    UVM_KMF_VAONLY |
1080 		    (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA));
1081 		if (pmap->pm_ptab == NULL)
1082 			return ENOMEM;
1083 	}
1084 
1085 	/*
1086 	 * Segment table entry not valid, we need a new PT page
1087 	 */
1088 	if (!pmap_ste_v(pmap, va)) {
1089 		int err = pmap_enter_ptpage(pmap, va, can_fail);
1090 		if (err)
1091 			return err;
1092 	}
1093 
1094 	pa = m68k_trunc_page(pa);
1095 	pte = pmap_pte(pmap, va);
1096 	opa = pmap_pte_pa(pte);
1097 
1098 	PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
1099 
1100 	/*
1101 	 * Mapping has not changed, must be protection or wiring change.
1102 	 */
1103 	if (opa == pa) {
1104 		/*
1105 		 * Wiring change, just update stats.
1106 		 * We don't worry about wiring PT pages as they remain
1107 		 * resident as long as there are valid mappings in them.
1108 		 * Hence, if a user page is wired, the PT page will be also.
1109 		 */
1110 		if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
1111 			PMAP_DPRINTF(PDB_ENTER,
1112 			    ("enter: wiring change -> %x\n", wired));
1113 			if (wired)
1114 				pmap->pm_stats.wired_count++;
1115 			else
1116 				pmap->pm_stats.wired_count--;
1117 		}
1118 		/*
1119 		 * Retain cache inhibition status
1120 		 */
1121 		checkpv = false;
1122 		if (pmap_pte_ci(pte))
1123 			cacheable = false;
1124 		goto validate;
1125 	}
1126 
1127 	/*
1128 	 * Mapping has changed, invalidate old range and fall through to
1129 	 * handle validating new mapping.
1130 	 */
1131 	if (opa) {
1132 		PMAP_DPRINTF(PDB_ENTER,
1133 		    ("enter: removing old mapping %lx\n", va));
1134 		pmap_remove_mapping(pmap, va, pte,
1135 		    PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE, &opv);
1136 	}
1137 
1138 	/*
1139 	 * If this is a new user mapping, increment the wiring count
1140 	 * on this PT page.  PT pages are wired down as long as there
1141 	 * is a valid mapping in the page.
1142 	 */
1143 	if (pmap != pmap_kernel())
1144 		pmap_ptpage_addref(trunc_page((vaddr_t)pte));
1145 
1146 	/*
1147 	 * Enter on the PV list if part of our managed memory
1148 	 * Note that we raise IPL while manipulating pv_table
1149 	 * since pmap_enter can be called at interrupt time.
1150 	 */
1151 	if (PAGE_IS_MANAGED(pa)) {
1152 		struct pv_header *pvh;
1153 		struct pv_entry *pv, *npv;
1154 		int s;
1155 
1156 		pvh = pa_to_pvh(pa);
1157 		pv = &pvh->pvh_first;
1158 		s = splvm();
1159 
1160 		PMAP_DPRINTF(PDB_ENTER,
1161 		    ("enter: pv at %p: %lx/%p/%p\n",
1162 		    pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
1163 		/*
1164 		 * No entries yet, use header as the first entry
1165 		 */
1166 		if (pv->pv_pmap == NULL) {
1167 			pv->pv_va = va;
1168 			pv->pv_pmap = pmap;
1169 			pv->pv_next = NULL;
1170 			pv->pv_ptste = NULL;
1171 			pv->pv_ptpmap = NULL;
1172 			pvh->pvh_attrs = 0;
1173 		}
1174 		/*
1175 		 * There is at least one other VA mapping this page.
1176 		 * Place this entry after the header.
1177 		 */
1178 		else {
1179 #ifdef DEBUG
1180 			for (npv = pv; npv; npv = npv->pv_next)
1181 				if (pmap == npv->pv_pmap && va == npv->pv_va)
1182 					panic("pmap_enter: already in pv_tab");
1183 #endif
1184 			if (opv != NULL) {
1185 				npv = opv;
1186 				opv = NULL;
1187 			} else {
1188 				npv = pmap_alloc_pv();
1189 			}
1190 			KASSERT(npv != NULL);
1191 			npv->pv_va = va;
1192 			npv->pv_pmap = pmap;
1193 			npv->pv_next = pv->pv_next;
1194 			npv->pv_ptste = NULL;
1195 			npv->pv_ptpmap = NULL;
1196 			pv->pv_next = npv;
1197 
1198 #ifdef CACHE_HAVE_VAC
1199 
1200 			/*
1201 			 * Since there is another logical mapping for the
1202 			 * same page we may need to cache-inhibit the
1203 			 * descriptors on those CPUs with external VACs.
1204 			 * We don't need to CI if:
1205 			 *
1206 			 * - No two mappings belong to the same user pmaps.
1207 			 *   Since the cache is flushed on context switches
1208 			 *   there is no problem between user processes.
1209 			 *
1210 			 * - Mappings within a single pmap are a certain
1211 			 *   magic distance apart.  VAs at these appropriate
1212 			 *   boundaries map to the same cache entries or
1213 			 *   otherwise don't conflict.
1214 			 *
1215 			 * To keep it simple, we only check for these special
1216 			 * cases if there are only two mappings, otherwise we
1217 			 * punt and always CI.
1218 			 *
1219 			 * Note that there are no aliasing problems with the
1220 			 * on-chip data-cache when the WA bit is set.
1221 			 */
1222 
1223 			if (pmap_aliasmask) {
1224 				if (pvh->pvh_attrs & PVH_CI) {
1225 					PMAP_DPRINTF(PDB_CACHE,
1226 					    ("enter: pa %lx already CI'ed\n",
1227 					    pa));
1228 					checkpv = cacheable = false;
1229 				} else if (npv->pv_next ||
1230 					   ((pmap == pv->pv_pmap ||
1231 					     pmap == pmap_kernel() ||
1232 					     pv->pv_pmap == pmap_kernel()) &&
1233 					    ((pv->pv_va & pmap_aliasmask) !=
1234 					     (va & pmap_aliasmask)))) {
1235 					PMAP_DPRINTF(PDB_CACHE,
1236 					    ("enter: pa %lx CI'ing all\n",
1237 					    pa));
1238 					cacheable = false;
1239 					pvh->pvh_attrs |= PVH_CI;
1240 				}
1241 			}
1242 #endif
1243 		}
1244 
1245 		/*
1246 		 * Speed pmap_is_referenced() or pmap_is_modified() based
1247 		 * on the hint provided in access_type.
1248 		 */
1249 #ifdef DIAGNOSTIC
1250 		if ((flags & VM_PROT_ALL) & ~prot)
1251 			panic("pmap_enter: access_type exceeds prot");
1252 #endif
1253 		if (flags & VM_PROT_WRITE)
1254 			pvh->pvh_attrs |= (PG_U|PG_M);
1255 		else if (flags & VM_PROT_ALL)
1256 			pvh->pvh_attrs |= PG_U;
1257 
1258 		splx(s);
1259 	}
1260 	/*
1261 	 * Assumption: if it is not part of our managed memory
1262 	 * then it must be device memory which may be volitile.
1263 	 */
1264 	else if (pmap_initialized) {
1265 		checkpv = cacheable = false;
1266 	}
1267 
1268 	/*
1269 	 * Increment counters
1270 	 */
1271 	pmap->pm_stats.resident_count++;
1272 	if (wired)
1273 		pmap->pm_stats.wired_count++;
1274 
1275 validate:
1276 #ifdef CACHE_HAVE_VAC
1277 	/*
1278 	 * Purge kernel side of VAC to ensure we get correct state
1279 	 * of HW bits so we don't clobber them.
1280 	 */
1281 	if (pmap_aliasmask)
1282 		DCIS();
1283 #endif
1284 
1285 	/*
1286 	 * Build the new PTE.
1287 	 */
1288 
1289 	npte = pa | pte_prot(pmap, prot) | (*pte & (PG_M|PG_U)) | PG_V;
1290 	if (wired)
1291 		npte |= PG_W;
1292 	if (!checkpv && !cacheable)
1293 #if defined(M68040) || defined(M68060)
1294 #if defined(M68020) || defined(M68030)
1295 		npte |= (mmutype == MMU_68040 ? PG_CIN : PG_CI);
1296 #else
1297 		npte |= PG_CIN;
1298 #endif
1299 #else
1300 		npte |= PG_CI;
1301 #endif
1302 #if defined(M68040) || defined(M68060)
1303 #if defined(M68020) || defined(M68030)
1304 	else if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
1305 #else
1306 	else if ((npte & (PG_PROT|PG_CI)) == PG_RW)
1307 #endif
1308 		npte |= PG_CCB;
1309 #endif
1310 
1311 	PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
1312 
1313 	/*
1314 	 * Remember if this was a wiring-only change.
1315 	 * If so, we need not flush the TLB and caches.
1316 	 */
1317 
1318 	wired = ((*pte ^ npte) == PG_W);
1319 #if defined(M68040) || defined(M68060)
1320 #if defined(M68020) || defined(M68030)
1321 	if (mmutype == MMU_68040 && !wired)
1322 #else
1323 	if (!wired)
1324 #endif
1325 	{
1326 		DCFP(pa);
1327 		ICPP(pa);
1328 	}
1329 #endif
1330 	*pte = npte;
1331 	if (!wired && active_pmap(pmap))
1332 		TBIS(va);
1333 #ifdef CACHE_HAVE_VAC
1334 	/*
1335 	 * The following is executed if we are entering a second
1336 	 * (or greater) mapping for a physical page and the mappings
1337 	 * may create an aliasing problem.  In this case we must
1338 	 * cache inhibit the descriptors involved and flush any
1339 	 * external VAC.
1340 	 */
1341 	if (checkpv && !cacheable) {
1342 		pmap_changebit(pa, PG_CI, ~0);
1343 		DCIA();
1344 #ifdef DEBUG
1345 		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
1346 		    (PDB_CACHE|PDB_PVDUMP))
1347 			pmap_pvdump(pa);
1348 #endif
1349 	}
1350 #endif
1351 #ifdef DEBUG
1352 	if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
1353 		pmap_check_wiring("enter", trunc_page((vaddr_t)pte));
1354 #endif
1355 
1356 	if (opv != NULL)
1357 		pmap_free_pv(opv);
1358 
1359 	return 0;
1360 }
1361 
1362 void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)1363 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1364 {
1365 	pmap_t pmap = pmap_kernel();
1366 	pt_entry_t *pte;
1367 	int s, npte;
1368 
1369 	PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
1370 	    ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
1371 
1372 	/*
1373 	 * Segment table entry not valid, we need a new PT page
1374 	 */
1375 
1376 	if (!pmap_ste_v(pmap, va)) {
1377 		s = splvm();
1378 		pmap_enter_ptpage(pmap, va, false);
1379 		splx(s);
1380 	}
1381 
1382 	pa = m68k_trunc_page(pa);
1383 	pte = pmap_pte(pmap, va);
1384 
1385 	PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
1386 	KASSERT(!pmap_pte_v(pte));
1387 
1388 	/*
1389 	 * Increment counters
1390 	 */
1391 
1392 	pmap->pm_stats.resident_count++;
1393 	pmap->pm_stats.wired_count++;
1394 
1395 	/*
1396 	 * Build the new PTE.
1397 	 */
1398 
1399 	npte = pa | pte_prot(pmap, prot) | PG_V | PG_W;
1400 #if defined(M68040) || defined(M68060)
1401 #if defined(M68020) || defined(M68030)
1402 	if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW)
1403 #else
1404 	if ((npte & PG_PROT) == PG_RW)
1405 #endif
1406 		npte |= PG_CCB;
1407 
1408 	if (mmutype == MMU_68040) {
1409 		DCFP(pa);
1410 		ICPP(pa);
1411 	}
1412 #endif
1413 
1414 	*pte = npte;
1415 	TBIS(va);
1416 }
1417 
1418 void
pmap_kremove(vaddr_t va,vsize_t size)1419 pmap_kremove(vaddr_t va, vsize_t size)
1420 {
1421 	pmap_t pmap = pmap_kernel();
1422 	pt_entry_t *pte;
1423 	vaddr_t nssva;
1424 	vaddr_t eva = va + size;
1425 #ifdef CACHE_HAVE_VAC
1426 	bool firstpage, needcflush;
1427 #endif
1428 
1429 	PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
1430 	    ("pmap_kremove(%lx, %lx)\n", va, size));
1431 
1432 #ifdef CACHE_HAVE_VAC
1433 	firstpage = true;
1434 	needcflush = false;
1435 #endif
1436 	while (va < eva) {
1437 		nssva = m68k_trunc_seg(va) + NBSEG;
1438 		if (nssva == 0 || nssva > eva)
1439 			nssva = eva;
1440 
1441 		/*
1442 		 * If VA belongs to an unallocated segment,
1443 		 * skip to the next segment boundary.
1444 		 */
1445 
1446 		if (!pmap_ste_v(pmap, va)) {
1447 			va = nssva;
1448 			continue;
1449 		}
1450 
1451 		/*
1452 		 * Invalidate every valid mapping within this segment.
1453 		 */
1454 
1455 		pte = pmap_pte(pmap, va);
1456 		while (va < nssva) {
1457 			if (!pmap_pte_v(pte)) {
1458 				pte++;
1459 				va += PAGE_SIZE;
1460 				continue;
1461 			}
1462 #ifdef CACHE_HAVE_VAC
1463 			if (pmap_aliasmask) {
1464 
1465 				/*
1466 				 * Purge kernel side of VAC to ensure
1467 				 * we get the correct state of any
1468 				 * hardware maintained bits.
1469 				 */
1470 
1471 				if (firstpage) {
1472 					DCIS();
1473 					firstpage = false;
1474 				}
1475 
1476 				/*
1477 				 * Remember if we may need to
1478 				 * flush the VAC.
1479 				 */
1480 
1481 				needcflush = true;
1482 			}
1483 #endif
1484 			pmap->pm_stats.wired_count--;
1485 			pmap->pm_stats.resident_count--;
1486 			*pte = PG_NV;
1487 			TBIS(va);
1488 			pte++;
1489 			va += PAGE_SIZE;
1490 		}
1491 	}
1492 
1493 #ifdef CACHE_HAVE_VAC
1494 
1495 	/*
1496 	 * In a couple of cases, we don't need to worry about flushing
1497 	 * the VAC:
1498 	 * 	1. if this is a kernel mapping,
1499 	 *	   we have already done it
1500 	 *	2. if it is a user mapping not for the current process,
1501 	 *	   it won't be there
1502 	 */
1503 
1504 	if (pmap_aliasmask && !active_user_pmap(pmap))
1505 		needcflush = false;
1506 	if (needcflush) {
1507 		if (pmap == pmap_kernel()) {
1508 			DCIS();
1509 		} else {
1510 			DCIU();
1511 		}
1512 	}
1513 #endif
1514 }
1515 
1516 /*
1517  * pmap_unwire:			[ INTERFACE ]
1518  *
1519  *	Clear the wired attribute for a map/virtual-address pair.
1520  *
1521  *	The mapping must already exist in the pmap.
1522  */
1523 void
pmap_unwire(pmap_t pmap,vaddr_t va)1524 pmap_unwire(pmap_t pmap, vaddr_t va)
1525 {
1526 	pt_entry_t *pte;
1527 
1528 	PMAP_DPRINTF(PDB_FOLLOW,
1529 	    ("pmap_unwire(%p, %lx)\n", pmap, va));
1530 
1531 	pte = pmap_pte(pmap, va);
1532 
1533 	/*
1534 	 * If wiring actually changed (always?) clear the wire bit and
1535 	 * update the wire count.  Note that wiring is not a hardware
1536 	 * characteristic so there is no need to invalidate the TLB.
1537 	 */
1538 
1539 	if (pmap_pte_w_chg(pte, 0)) {
1540 		pmap_pte_set_w(pte, false);
1541 		pmap->pm_stats.wired_count--;
1542 	}
1543 }
1544 
1545 /*
1546  * pmap_extract:		[ INTERFACE ]
1547  *
1548  *	Extract the physical address associated with the given
1549  *	pmap/virtual address pair.
1550  */
1551 bool
pmap_extract(pmap_t pmap,vaddr_t va,paddr_t * pap)1552 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1553 {
1554 	paddr_t pa;
1555 	u_int pte;
1556 
1557 	PMAP_DPRINTF(PDB_FOLLOW,
1558 	    ("pmap_extract(%p, %lx) -> ", pmap, va));
1559 
1560 	if (pmap_ste_v(pmap, va)) {
1561 		pte = *(u_int *)pmap_pte(pmap, va);
1562 		if (pte) {
1563 			pa = (pte & PG_FRAME) | (va & ~PG_FRAME);
1564 			if (pap != NULL)
1565 				*pap = pa;
1566 #ifdef DEBUG
1567 			if (pmapdebug & PDB_FOLLOW)
1568 				printf("%lx\n", pa);
1569 #endif
1570 			return true;
1571 		}
1572 	}
1573 #ifdef DEBUG
1574 	if (pmapdebug & PDB_FOLLOW)
1575 		printf("failed\n");
1576 #endif
1577 	return false;
1578 }
1579 
1580 /*
1581  * pmap_copy:		[ INTERFACE ]
1582  *
1583  *	Copy the mapping range specified by src_addr/len
1584  *	from the source map to the range dst_addr/len
1585  *	in the destination map.
1586  *
1587  *	This routine is only advisory and need not do anything.
1588  */
1589 void
pmap_copy(pmap_t dst_pmap,pmap_t src_pmap,vaddr_t dst_addr,vsize_t len,vaddr_t src_addr)1590 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
1591     vaddr_t src_addr)
1592 {
1593 
1594 	PMAP_DPRINTF(PDB_FOLLOW,
1595 	    ("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
1596 	    dst_pmap, src_pmap, dst_addr, len, src_addr));
1597 }
1598 
1599 /*
1600  * pmap_collect1():
1601  *
1602  *	Garbage-collect KPT pages.  Helper for the above (bogus)
1603  *	pmap_collect().
1604  *
1605  *	Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER
1606  *	WAY OF HANDLING PT PAGES!
1607  */
1608 static inline void
pmap_collect1(pmap_t pmap,paddr_t startpa,paddr_t endpa)1609 pmap_collect1(pmap_t pmap, paddr_t startpa, paddr_t endpa)
1610 {
1611 	paddr_t pa;
1612 	struct pv_header *pvh;
1613 	struct pv_entry *pv;
1614 	pt_entry_t *pte;
1615 	paddr_t kpa;
1616 #ifdef DEBUG
1617 	st_entry_t *ste;
1618 	int opmapdebug = 0;
1619 #endif
1620 
1621 	for (pa = startpa; pa < endpa; pa += PAGE_SIZE) {
1622 		struct kpt_page *kpt, **pkpt;
1623 
1624 		/*
1625 		 * Locate physical pages which are being used as kernel
1626 		 * page table pages.
1627 		 */
1628 
1629 		pvh = pa_to_pvh(pa);
1630 		pv = &pvh->pvh_first;
1631 		if (pv->pv_pmap != pmap_kernel() ||
1632 		    !(pvh->pvh_attrs & PVH_PTPAGE))
1633 			continue;
1634 		do {
1635 			if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
1636 				break;
1637 		} while ((pv = pv->pv_next));
1638 		if (pv == NULL)
1639 			continue;
1640 #ifdef DEBUG
1641 		if (pv->pv_va < (vaddr_t)Sysmap ||
1642 		    pv->pv_va >= (vaddr_t)Sysmap + M68K_MAX_PTSIZE) {
1643 			printf("collect: kernel PT VA out of range\n");
1644 			pmap_pvdump(pa);
1645 			continue;
1646 		}
1647 #endif
1648 		pte = (pt_entry_t *)(pv->pv_va + PAGE_SIZE);
1649 		while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
1650 			;
1651 		if (pte >= (pt_entry_t *)pv->pv_va)
1652 			continue;
1653 
1654 #ifdef DEBUG
1655 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
1656 			printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
1657 			    pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
1658 			opmapdebug = pmapdebug;
1659 			pmapdebug |= PDB_PTPAGE;
1660 		}
1661 
1662 		ste = pv->pv_ptste;
1663 #endif
1664 		/*
1665 		 * If all entries were invalid we can remove the page.
1666 		 * We call pmap_remove_entry to take care of invalidating
1667 		 * ST and Sysptmap entries.
1668 		 */
1669 
1670 		if (!pmap_extract(pmap, pv->pv_va, &kpa)) {
1671 			printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
1672 			    pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
1673 			panic("pmap_collect: mapping not found");
1674 		}
1675 		pmap_remove_mapping(pmap, pv->pv_va, NULL,
1676 		    PRM_TFLUSH|PRM_CFLUSH, NULL);
1677 
1678 		/*
1679 		 * Use the physical address to locate the original
1680 		 * (kmem_alloc assigned) address for the page and put
1681 		 * that page back on the free list.
1682 		 */
1683 
1684 		for (pkpt = &kpt_used_list, kpt = *pkpt;
1685 		     kpt != NULL;
1686 		     pkpt = &kpt->kpt_next, kpt = *pkpt)
1687 			if (kpt->kpt_pa == kpa)
1688 				break;
1689 #ifdef DEBUG
1690 		if (kpt == NULL)
1691 			panic("pmap_collect: lost a KPT page");
1692 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1693 			printf("collect: %lx (%lx) to free list\n",
1694 			    kpt->kpt_va, kpa);
1695 #endif
1696 		*pkpt = kpt->kpt_next;
1697 		kpt->kpt_next = kpt_free_list;
1698 		kpt_free_list = kpt;
1699 #ifdef DEBUG
1700 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1701 			pmapdebug = opmapdebug;
1702 
1703 		if (*ste != SG_NV)
1704 			printf("collect: kernel STE at %p still valid (%x)\n",
1705 			    ste, *ste);
1706 		ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
1707 		if (*ste != SG_NV)
1708 			printf("collect: kernel PTmap at %p still valid (%x)\n",
1709 			    ste, *ste);
1710 #endif
1711 	}
1712 }
1713 
1714 /*
1715  * pmap_collect:
1716  *
1717  *	Helper for pmap_enter_ptpage().
1718  *
1719  *	Garbage collects the physical map system for pages which are no
1720  *	longer used.  Success need not be guaranteed -- that is, there
1721  *	may well be pages which are not referenced, but others may be
1722  *	collected.
1723  */
1724 static void
pmap_collect(void)1725 pmap_collect(void)
1726 {
1727 	int s;
1728 	uvm_physseg_t bank;
1729 
1730 	/*
1731 	 * XXX This is very bogus.  We should handle kernel PT
1732 	 * XXX pages much differently.
1733 	 */
1734 
1735 	s = splvm();
1736 	for (bank = uvm_physseg_get_first();
1737 	     uvm_physseg_valid_p(bank);
1738 	     bank = uvm_physseg_get_next(bank)) {
1739 		pmap_collect1(pmap_kernel(), ptoa(uvm_physseg_get_start(bank)),
1740 		    ptoa(uvm_physseg_get_end(bank)));
1741 	}
1742 	splx(s);
1743 }
1744 
1745 /*
1746  * pmap_zero_page:		[ INTERFACE ]
1747  *
1748  *	Zero the specified (machine independent) page by mapping the page
1749  *	into virtual memory and using memset to clear its contents, one
1750  *	machine dependent page at a time.
1751  *
1752  *	Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
1753  *	      (Actually, we go to splvm(), and since we don't
1754  *	      support multiple processors, this is sufficient.)
1755  */
1756 void
pmap_zero_page(paddr_t phys)1757 pmap_zero_page(paddr_t phys)
1758 {
1759 	int npte;
1760 
1761 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
1762 
1763 	npte = phys | PG_V;
1764 #ifdef CACHE_HAVE_VAC
1765 	if (pmap_aliasmask) {
1766 
1767 		/*
1768 		 * Cache-inhibit the mapping on VAC machines, as we would
1769 		 * be wasting the cache load.
1770 		 */
1771 
1772 		npte |= PG_CI;
1773 	}
1774 #endif
1775 
1776 #if defined(M68040) || defined(M68060)
1777 #if defined(M68020) || defined(M68030)
1778 	if (mmutype == MMU_68040)
1779 #endif
1780 	{
1781 		/*
1782 		 * Set copyback caching on the page; this is required
1783 		 * for cache consistency (since regular mappings are
1784 		 * copyback as well).
1785 		 */
1786 
1787 		npte |= PG_CCB;
1788 	}
1789 #endif
1790 
1791 	*caddr1_pte = npte;
1792 	TBIS((vaddr_t)CADDR1);
1793 
1794 	zeropage(CADDR1);
1795 
1796 #ifdef DEBUG
1797 	*caddr1_pte = PG_NV;
1798 	TBIS((vaddr_t)CADDR1);
1799 #endif
1800 }
1801 
1802 /*
1803  * pmap_copy_page:		[ INTERFACE ]
1804  *
1805  *	Copy the specified (machine independent) page by mapping the page
1806  *	into virtual memory and using memcpy to copy the page, one machine
1807  *	dependent page at a time.
1808  *
1809  *	Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
1810  *	      (Actually, we go to splvm(), and since we don't
1811  *	      support multiple processors, this is sufficient.)
1812  */
1813 void
pmap_copy_page(paddr_t src,paddr_t dst)1814 pmap_copy_page(paddr_t src, paddr_t dst)
1815 {
1816 	int npte1, npte2;
1817 
1818 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst));
1819 
1820 	npte1 = src | PG_RO | PG_V;
1821 	npte2 = dst | PG_V;
1822 #ifdef CACHE_HAVE_VAC
1823 	if (pmap_aliasmask) {
1824 
1825 		/*
1826 		 * Cache-inhibit the mapping on VAC machines, as we would
1827 		 * be wasting the cache load.
1828 		 */
1829 
1830 		npte1 |= PG_CI;
1831 		npte2 |= PG_CI;
1832 	}
1833 #endif
1834 
1835 #if defined(M68040) || defined(M68060)
1836 #if defined(M68020) || defined(M68030)
1837 	if (mmutype == MMU_68040)
1838 #endif
1839 	{
1840 		/*
1841 		 * Set copyback caching on the pages; this is required
1842 		 * for cache consistency (since regular mappings are
1843 		 * copyback as well).
1844 		 */
1845 
1846 		npte1 |= PG_CCB;
1847 		npte2 |= PG_CCB;
1848 	}
1849 #endif
1850 
1851 	*caddr1_pte = npte1;
1852 	TBIS((vaddr_t)CADDR1);
1853 
1854 	*caddr2_pte = npte2;
1855 	TBIS((vaddr_t)CADDR2);
1856 
1857 	copypage(CADDR1, CADDR2);
1858 
1859 #ifdef DEBUG
1860 	*caddr1_pte = PG_NV;
1861 	TBIS((vaddr_t)CADDR1);
1862 
1863 	*caddr2_pte = PG_NV;
1864 	TBIS((vaddr_t)CADDR2);
1865 #endif
1866 }
1867 
1868 /*
1869  * pmap_clear_modify:		[ INTERFACE ]
1870  *
1871  *	Clear the modify bits on the specified physical page.
1872  */
1873 bool
pmap_clear_modify(struct vm_page * pg)1874 pmap_clear_modify(struct vm_page *pg)
1875 {
1876 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
1877 
1878 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", pg));
1879 
1880 	return pmap_changebit(pa, 0, ~PG_M);
1881 }
1882 
1883 /*
1884  * pmap_clear_reference:	[ INTERFACE ]
1885  *
1886  *	Clear the reference bit on the specified physical page.
1887  */
1888 bool
pmap_clear_reference(struct vm_page * pg)1889 pmap_clear_reference(struct vm_page *pg)
1890 {
1891 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
1892 
1893 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%p)\n", pg));
1894 
1895 	return pmap_changebit(pa, 0, ~PG_U);
1896 }
1897 
1898 /*
1899  * pmap_is_referenced:		[ INTERFACE ]
1900  *
1901  *	Return whether or not the specified physical page is referenced
1902  *	by any physical maps.
1903  */
1904 bool
pmap_is_referenced(struct vm_page * pg)1905 pmap_is_referenced(struct vm_page *pg)
1906 {
1907 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
1908 
1909 	return pmap_testbit(pa, PG_U);
1910 }
1911 
1912 /*
1913  * pmap_is_modified:		[ INTERFACE ]
1914  *
1915  *	Return whether or not the specified physical page is modified
1916  *	by any physical maps.
1917  */
1918 bool
pmap_is_modified(struct vm_page * pg)1919 pmap_is_modified(struct vm_page *pg)
1920 {
1921 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
1922 
1923 	return pmap_testbit(pa, PG_M);
1924 }
1925 
1926 /*
1927  * pmap_phys_address:		[ INTERFACE ]
1928  *
1929  *	Return the physical address corresponding to the specified
1930  *	cookie.  Used by the device pager to decode a device driver's
1931  *	mmap entry point return value.
1932  *
1933  *	Note: no locking is necessary in this function.
1934  */
1935 paddr_t
pmap_phys_address(paddr_t ppn)1936 pmap_phys_address(paddr_t ppn)
1937 {
1938 	return m68k_ptob(ppn);
1939 }
1940 
1941 #ifdef CACHE_HAVE_VAC
1942 /*
1943  * pmap_prefer:			[ INTERFACE ]
1944  *
1945  *	Find the first virtual address >= *vap that does not
1946  *	cause a virtually-addressed cache alias problem.
1947  */
1948 void
pmap_prefer(vaddr_t foff,vaddr_t * vap)1949 pmap_prefer(vaddr_t foff, vaddr_t *vap)
1950 {
1951 	vaddr_t va;
1952 	vsize_t d;
1953 
1954 #ifdef M68K_MMU_MOTOROLA
1955 	if (pmap_aliasmask)
1956 #endif
1957 	{
1958 		va = *vap;
1959 		d = foff - va;
1960 		d &= pmap_aliasmask;
1961 		*vap = va + d;
1962 	}
1963 }
1964 #endif /* CACHE_HAVE_VAC */
1965 
1966 /*
1967  * Miscellaneous support routines follow
1968  */
1969 
1970 /*
1971  * pmap_remove_mapping:
1972  *
1973  *	Invalidate a single page denoted by pmap/va.
1974  *
1975  *	If (pte != NULL), it is the already computed PTE for the page.
1976  *
1977  *	If (flags & PRM_TFLUSH), we must invalidate any TLB information.
1978  *
1979  *	If (flags & PRM_CFLUSH), we must flush/invalidate any cache
1980  *	information.
1981  *
1982  *	If (flags & PRM_KEEPPTPAGE), we don't free the page table page
1983  *	if the reference drops to zero.
1984  */
1985 /* static */
1986 void
pmap_remove_mapping(pmap_t pmap,vaddr_t va,pt_entry_t * pte,int flags,struct pv_entry ** opvp)1987 pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags,
1988     struct pv_entry **opvp)
1989 {
1990 	paddr_t pa;
1991 	struct pv_header *pvh;
1992 	struct pv_entry *pv, *npv, *opv = NULL;
1993 	struct pmap *ptpmap;
1994 	st_entry_t *ste;
1995 	int s, bits;
1996 #ifdef DEBUG
1997 	pt_entry_t opte;
1998 #endif
1999 
2000 	PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
2001 	    ("pmap_remove_mapping(%p, %lx, %p, %x, %p)\n",
2002 	    pmap, va, pte, flags, opvp));
2003 
2004 	/*
2005 	 * PTE not provided, compute it from pmap and va.
2006 	 */
2007 
2008 	if (pte == NULL) {
2009 		pte = pmap_pte(pmap, va);
2010 		if (*pte == PG_NV)
2011 			return;
2012 	}
2013 
2014 #ifdef CACHE_HAVE_VAC
2015 	if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
2016 
2017 		/*
2018 		 * Purge kernel side of VAC to ensure we get the correct
2019 		 * state of any hardware maintained bits.
2020 		 */
2021 
2022 		DCIS();
2023 
2024 		/*
2025 		 * If this is a non-CI user mapping for the current process,
2026 		 * flush the VAC.  Note that the kernel side was flushed
2027 		 * above so we don't worry about non-CI kernel mappings.
2028 		 */
2029 
2030 		if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) {
2031 			DCIU();
2032 		}
2033 	}
2034 #endif
2035 
2036 	pa = pmap_pte_pa(pte);
2037 #ifdef DEBUG
2038 	opte = *pte;
2039 #endif
2040 
2041 	/*
2042 	 * Update statistics
2043 	 */
2044 
2045 	if (pmap_pte_w(pte))
2046 		pmap->pm_stats.wired_count--;
2047 	pmap->pm_stats.resident_count--;
2048 
2049 #if defined(M68040) || defined(M68060)
2050 #if defined(M68020) || defined(M68030)
2051 	if (mmutype == MMU_68040)
2052 #endif
2053 	if ((flags & PRM_CFLUSH)) {
2054 		DCFP(pa);
2055 		ICPP(pa);
2056 	}
2057 #endif
2058 
2059 	/*
2060 	 * Invalidate the PTE after saving the reference modify info.
2061 	 */
2062 
2063 	PMAP_DPRINTF(PDB_REMOVE, ("remove: invalidating pte at %p\n", pte));
2064 	bits = *pte & (PG_U|PG_M);
2065 	*pte = PG_NV;
2066 	if ((flags & PRM_TFLUSH) && active_pmap(pmap))
2067 		TBIS(va);
2068 
2069 	/*
2070 	 * For user mappings decrement the wiring count on
2071 	 * the PT page.
2072 	 */
2073 
2074 	if (pmap != pmap_kernel()) {
2075 		vaddr_t ptpva = trunc_page((vaddr_t)pte);
2076 		int refs = pmap_ptpage_delref(ptpva);
2077 #ifdef DEBUG
2078 		if (pmapdebug & PDB_WIRING)
2079 			pmap_check_wiring("remove", ptpva);
2080 #endif
2081 
2082 		/*
2083 		 * If reference count drops to 0, and we're not instructed
2084 		 * to keep it around, free the PT page.
2085 		 */
2086 
2087 		if (refs == 0 && (flags & PRM_KEEPPTPAGE) == 0) {
2088 #ifdef DIAGNOSTIC
2089 			struct pv_header *ptppvh;
2090 			struct pv_entry *ptppv;
2091 #endif
2092 			paddr_t ptppa;
2093 
2094 			ptppa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
2095 #ifdef DIAGNOSTIC
2096 			if (PAGE_IS_MANAGED(ptppa) == 0)
2097 				panic("pmap_remove_mapping: unmanaged PT page");
2098 			ptppvh = pa_to_pvh(ptppa);
2099 			ptppv = &ptppvh->pvh_first;
2100 			if (ptppv->pv_ptste == NULL)
2101 				panic("pmap_remove_mapping: ptste == NULL");
2102 			if (ptppv->pv_pmap != pmap_kernel() ||
2103 			    ptppv->pv_va != ptpva ||
2104 			    ptppv->pv_next != NULL)
2105 				panic("pmap_remove_mapping: "
2106 				    "bad PT page pmap %p, va 0x%lx, next %p",
2107 				    ptppv->pv_pmap, ptppv->pv_va,
2108 				    ptppv->pv_next);
2109 #endif
2110 			pmap_remove_mapping(pmap_kernel(), ptpva,
2111 			    NULL, PRM_TFLUSH|PRM_CFLUSH, NULL);
2112 			rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
2113 			uvm_pagefree(PHYS_TO_VM_PAGE(ptppa));
2114 			rw_exit(uvm_kernel_object->vmobjlock);
2115 			PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
2116 			    ("remove: PT page 0x%lx (0x%lx) freed\n",
2117 			    ptpva, ptppa));
2118 		}
2119 	}
2120 
2121 	/*
2122 	 * If this isn't a managed page, we are all done.
2123 	 */
2124 
2125 	if (PAGE_IS_MANAGED(pa) == 0)
2126 		return;
2127 
2128 	/*
2129 	 * Otherwise remove it from the PV table
2130 	 * (raise IPL since we may be called at interrupt time).
2131 	 */
2132 
2133 	pvh = pa_to_pvh(pa);
2134 	pv = &pvh->pvh_first;
2135 	ste = NULL;
2136 	s = splvm();
2137 
2138 	/*
2139 	 * If it is the first entry on the list, it is actually
2140 	 * in the header and we must copy the following entry up
2141 	 * to the header.  Otherwise we must search the list for
2142 	 * the entry.  In either case we free the now unused entry.
2143 	 */
2144 
2145 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
2146 		ste = pv->pv_ptste;
2147 		ptpmap = pv->pv_ptpmap;
2148 		npv = pv->pv_next;
2149 		if (npv) {
2150 			*pv = *npv;
2151 			opv = npv;
2152 		} else
2153 			pv->pv_pmap = NULL;
2154 	} else {
2155 		for (npv = pv->pv_next; npv; npv = npv->pv_next) {
2156 			if (pmap == npv->pv_pmap && va == npv->pv_va)
2157 				break;
2158 			pv = npv;
2159 		}
2160 #ifdef DEBUG
2161 		if (npv == NULL)
2162 			panic("pmap_remove: PA not in pv_tab");
2163 #endif
2164 		ste = npv->pv_ptste;
2165 		ptpmap = npv->pv_ptpmap;
2166 		pv->pv_next = npv->pv_next;
2167 		opv = npv;
2168 		pvh = pa_to_pvh(pa);
2169 		pv = &pvh->pvh_first;
2170 	}
2171 
2172 #ifdef CACHE_HAVE_VAC
2173 
2174 	/*
2175 	 * If only one mapping left we no longer need to cache inhibit
2176 	 */
2177 
2178 	if (pmap_aliasmask &&
2179 	    pv->pv_pmap && pv->pv_next == NULL && (pvh->pvh_attrs & PVH_CI)) {
2180 		PMAP_DPRINTF(PDB_CACHE,
2181 		    ("remove: clearing CI for pa %lx\n", pa));
2182 		pvh->pvh_attrs &= ~PVH_CI;
2183 		pmap_changebit(pa, 0, ~PG_CI);
2184 #ifdef DEBUG
2185 		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
2186 		    (PDB_CACHE|PDB_PVDUMP))
2187 			pmap_pvdump(pa);
2188 #endif
2189 	}
2190 #endif
2191 
2192 	/*
2193 	 * If this was a PT page we must also remove the
2194 	 * mapping from the associated segment table.
2195 	 */
2196 
2197 	if (ste) {
2198 		PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
2199 		    ("remove: ste was %x@%p pte was %x@%p\n",
2200 		    *ste, ste, opte, pmap_pte(pmap, va)));
2201 #if defined(M68040) || defined(M68060)
2202 #if defined(M68020) || defined(M68030)
2203 		if (mmutype == MMU_68040)
2204 #endif
2205 		{
2206 			st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE];
2207 
2208 			while (ste < este)
2209 				*ste++ = SG_NV;
2210 #ifdef DEBUG
2211 			ste -= NPTEPG/SG4_LEV3SIZE;
2212 #endif
2213 		}
2214 #if defined(M68020) || defined(M68030)
2215 		else
2216 #endif
2217 #endif
2218 #if defined(M68020) || defined(M68030)
2219 		*ste = SG_NV;
2220 #endif
2221 
2222 		/*
2223 		 * If it was a user PT page, we decrement the
2224 		 * reference count on the segment table as well,
2225 		 * freeing it if it is now empty.
2226 		 */
2227 
2228 		if (ptpmap != pmap_kernel()) {
2229 			PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
2230 			    ("remove: stab %p, refcnt %d\n",
2231 			    ptpmap->pm_stab, ptpmap->pm_sref - 1));
2232 #ifdef DEBUG
2233 			if ((pmapdebug & PDB_PARANOIA) &&
2234 			    ptpmap->pm_stab !=
2235 			     (st_entry_t *)trunc_page((vaddr_t)ste))
2236 				panic("remove: bogus ste");
2237 #endif
2238 			if (--(ptpmap->pm_sref) == 0) {
2239 				PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
2240 				    ("remove: free stab %p\n",
2241 				    ptpmap->pm_stab));
2242 				uvm_km_free(st_map, (vaddr_t)ptpmap->pm_stab,
2243 				    M68K_STSIZE, UVM_KMF_WIRED);
2244 				ptpmap->pm_stab = Segtabzero;
2245 				ptpmap->pm_stpa = Segtabzeropa;
2246 #if defined(M68040) || defined(M68060)
2247 #if defined(M68020) || defined(M68030)
2248 				if (mmutype == MMU_68040)
2249 #endif
2250 					ptpmap->pm_stfree = protostfree;
2251 #endif
2252 
2253 				/*
2254 				 * XXX may have changed segment table
2255 				 * pointer for current process so
2256 				 * update now to reload hardware.
2257 				 */
2258 
2259 				if (active_user_pmap(ptpmap))
2260 					PMAP_ACTIVATE(ptpmap, 1);
2261 			}
2262 		}
2263 		pvh->pvh_attrs &= ~PVH_PTPAGE;
2264 		ptpmap->pm_ptpages--;
2265 	}
2266 
2267 	/*
2268 	 * Update saved attributes for managed page
2269 	 */
2270 
2271 	pvh->pvh_attrs |= bits;
2272 	splx(s);
2273 
2274 	if (opvp != NULL)
2275 		*opvp = opv;
2276 	else if (opv != NULL)
2277 		pmap_free_pv(opv);
2278 }
2279 
2280 /*
2281  * pmap_testbit:
2282  *
2283  *	Test the modified/referenced bits of a physical page.
2284  */
2285 /* static */
2286 bool
pmap_testbit(paddr_t pa,int bit)2287 pmap_testbit(paddr_t pa, int bit)
2288 {
2289 	struct pv_header *pvh;
2290 	struct pv_entry *pv;
2291 	pt_entry_t *pte;
2292 	int s;
2293 
2294 	pvh = pa_to_pvh(pa);
2295 	pv = &pvh->pvh_first;
2296 	s = splvm();
2297 
2298 	/*
2299 	 * Check saved info first
2300 	 */
2301 
2302 	if (pvh->pvh_attrs & bit) {
2303 		splx(s);
2304 		return true;
2305 	}
2306 
2307 #ifdef CACHE_HAVE_VAC
2308 
2309 	/*
2310 	 * Flush VAC to get correct state of any hardware maintained bits.
2311 	 */
2312 
2313 	if (pmap_aliasmask && (bit & (PG_U|PG_M)))
2314 		DCIS();
2315 #endif
2316 
2317 	/*
2318 	 * Not found.  Check current mappings, returning immediately if
2319 	 * found.  Cache a hit to speed future lookups.
2320 	 */
2321 
2322 	if (pv->pv_pmap != NULL) {
2323 		for (; pv; pv = pv->pv_next) {
2324 			pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2325 			if (*pte & bit) {
2326 				pvh->pvh_attrs |= bit;
2327 				splx(s);
2328 				return true;
2329 			}
2330 		}
2331 	}
2332 	splx(s);
2333 	return false;
2334 }
2335 
2336 /*
2337  * pmap_changebit:
2338  *
2339  *	Change the modified/referenced bits, or other PTE bits,
2340  *	for a physical page.
2341  */
2342 /* static */
2343 bool
pmap_changebit(paddr_t pa,int set,int mask)2344 pmap_changebit(paddr_t pa, int set, int mask)
2345 {
2346 	struct pv_header *pvh;
2347 	struct pv_entry *pv;
2348 	pt_entry_t *pte, npte;
2349 	vaddr_t va;
2350 	int s;
2351 #if defined(CACHE_HAVE_VAC) || defined(M68040) || defined(M68060)
2352 	bool firstpage = true;
2353 #endif
2354 	bool r;
2355 
2356 	PMAP_DPRINTF(PDB_BITS,
2357 	    ("pmap_changebit(%lx, %x, %x)\n", pa, set, mask));
2358 
2359 	pvh = pa_to_pvh(pa);
2360 	pv = &pvh->pvh_first;
2361 	s = splvm();
2362 
2363 	/*
2364 	 * Clear saved attributes (modify, reference)
2365 	 */
2366 
2367 	r = (pvh->pvh_attrs & ~mask) != 0;
2368 	pvh->pvh_attrs &= mask;
2369 
2370 	/*
2371 	 * Loop over all current mappings setting/clearing as appropriate
2372 	 * If setting RO do we need to clear the VAC?
2373 	 */
2374 
2375 	if (pv->pv_pmap != NULL) {
2376 #ifdef DEBUG
2377 		int toflush = 0;
2378 #endif
2379 		for (; pv; pv = pv->pv_next) {
2380 #ifdef DEBUG
2381 			toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
2382 #endif
2383 			va = pv->pv_va;
2384 			pte = pmap_pte(pv->pv_pmap, va);
2385 #ifdef CACHE_HAVE_VAC
2386 
2387 			/*
2388 			 * Flush VAC to ensure we get correct state of HW bits
2389 			 * so we don't clobber them.
2390 			 */
2391 
2392 			if (firstpage && pmap_aliasmask) {
2393 				firstpage = false;
2394 				DCIS();
2395 			}
2396 #endif
2397 			npte = (*pte | set) & mask;
2398 			if (*pte != npte) {
2399 				r = true;
2400 #if defined(M68040) || defined(M68060)
2401 				/*
2402 				 * If we are changing caching status or
2403 				 * protection make sure the caches are
2404 				 * flushed (but only once).
2405 				 */
2406 				if (firstpage &&
2407 #if defined(M68020) || defined(M68030)
2408 				    (mmutype == MMU_68040) &&
2409 #endif
2410 				    ((set == PG_RO) ||
2411 				     (set & PG_CMASK) ||
2412 				     (mask & PG_CMASK) == 0)) {
2413 					firstpage = false;
2414 					DCFP(pa);
2415 					ICPP(pa);
2416 				}
2417 #endif
2418 				*pte = npte;
2419 				if (active_pmap(pv->pv_pmap))
2420 					TBIS(va);
2421 			}
2422 		}
2423 	}
2424 	splx(s);
2425 	return r;
2426 }
2427 
2428 /*
2429  * pmap_enter_ptpage:
2430  *
2431  *	Allocate and map a PT page for the specified pmap/va pair.
2432  */
2433 /* static */
2434 int
pmap_enter_ptpage(pmap_t pmap,vaddr_t va,bool can_fail)2435 pmap_enter_ptpage(pmap_t pmap, vaddr_t va, bool can_fail)
2436 {
2437 	paddr_t ptpa;
2438 	struct vm_page *pg;
2439 	struct pv_header *pvh;
2440 	struct pv_entry *pv;
2441 	st_entry_t *ste;
2442 	int s;
2443 
2444 	PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE,
2445 	    ("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va));
2446 
2447 	/*
2448 	 * Allocate a segment table if necessary.  Note that it is allocated
2449 	 * from a private map and not pt_map.  This keeps user page tables
2450 	 * aligned on segment boundaries in the kernel address space.
2451 	 * The segment table is wired down.  It will be freed whenever the
2452 	 * reference count drops to zero.
2453 	 */
2454 	if (pmap->pm_stab == Segtabzero) {
2455 		pmap->pm_stab = (st_entry_t *)
2456 		    uvm_km_alloc(st_map, M68K_STSIZE, 0,
2457 		    UVM_KMF_WIRED | UVM_KMF_ZERO |
2458 		    (can_fail ? UVM_KMF_NOWAIT : 0));
2459 		if (pmap->pm_stab == NULL) {
2460 			pmap->pm_stab = Segtabzero;
2461 			return ENOMEM;
2462 		}
2463 		(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
2464 		    (paddr_t *)&pmap->pm_stpa);
2465 #if defined(M68040) || defined(M68060)
2466 #if defined(M68020) || defined(M68030)
2467 		if (mmutype == MMU_68040)
2468 #endif
2469 		{
2470 			pt_entry_t	*pte;
2471 
2472 			pte = pmap_pte(pmap_kernel(), pmap->pm_stab);
2473 			*pte = (*pte & ~PG_CMASK) | PG_CI;
2474 			pmap->pm_stfree = protostfree;
2475 		}
2476 #endif
2477 		/*
2478 		 * XXX may have changed segment table pointer for current
2479 		 * process so update now to reload hardware.
2480 		 */
2481 		if (active_user_pmap(pmap))
2482 			PMAP_ACTIVATE(pmap, 1);
2483 
2484 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2485 		    ("enter: pmap %p stab %p(%p)\n",
2486 		    pmap, pmap->pm_stab, pmap->pm_stpa));
2487 	}
2488 
2489 	ste = pmap_ste(pmap, va);
2490 #if defined(M68040) || defined(M68060)
2491 	/*
2492 	 * Allocate level 2 descriptor block if necessary
2493 	 */
2494 #if defined(M68020) || defined(M68030)
2495 	if (mmutype == MMU_68040)
2496 #endif
2497 	{
2498 		if (*ste == SG_NV) {
2499 			int ix;
2500 			void *addr;
2501 
2502 			ix = bmtol2(pmap->pm_stfree);
2503 			if (ix == -1)
2504 				panic("enter: out of address space"); /* XXX */
2505 			pmap->pm_stfree &= ~l2tobm(ix);
2506 			addr = (void *)&pmap->pm_stab[ix*SG4_LEV2SIZE];
2507 			memset(addr, 0, SG4_LEV2SIZE*sizeof(st_entry_t));
2508 			addr = (void *)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
2509 			*ste = (u_int)addr | SG_RW | SG_U | SG_V;
2510 
2511 			PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2512 			    ("enter: alloc ste2 %d(%p)\n", ix, addr));
2513 		}
2514 		ste = pmap_ste2(pmap, va);
2515 		/*
2516 		 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
2517 		 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
2518 		 * (16) such descriptors (PAGE_SIZE/SG4_LEV3SIZE bytes) to map a
2519 		 * PT page--the unit of allocation.  We set `ste' to point
2520 		 * to the first entry of that chunk which is validated in its
2521 		 * entirety below.
2522 		 */
2523 		ste = (st_entry_t *)((int)ste & ~(PAGE_SIZE/SG4_LEV3SIZE-1));
2524 
2525 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2526 		    ("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste));
2527 	}
2528 #endif
2529 	va = trunc_page((vaddr_t)pmap_pte(pmap, va));
2530 
2531 	/*
2532 	 * In the kernel we allocate a page from the kernel PT page
2533 	 * free list and map it into the kernel page table map (via
2534 	 * pmap_enter).
2535 	 */
2536 	if (pmap == pmap_kernel()) {
2537 		struct kpt_page *kpt;
2538 
2539 		s = splvm();
2540 		if ((kpt = kpt_free_list) == NULL) {
2541 			/*
2542 			 * No PT pages available.
2543 			 * Try once to free up unused ones.
2544 			 */
2545 			PMAP_DPRINTF(PDB_COLLECT,
2546 			    ("enter: no KPT pages, collecting...\n"));
2547 			pmap_collect();
2548 			if ((kpt = kpt_free_list) == NULL)
2549 				panic("pmap_enter_ptpage: can't get KPT page");
2550 		}
2551 		kpt_free_list = kpt->kpt_next;
2552 		kpt->kpt_next = kpt_used_list;
2553 		kpt_used_list = kpt;
2554 		ptpa = kpt->kpt_pa;
2555 		memset((void *)kpt->kpt_va, 0, PAGE_SIZE);
2556 		pmap_enter(pmap, va, ptpa, VM_PROT_READ | VM_PROT_WRITE,
2557 		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
2558 		pmap_update(pmap);
2559 #ifdef DEBUG
2560 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
2561 			int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
2562 
2563 			printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n",
2564 			    ix, Sysptmap[ix], kpt->kpt_va);
2565 		}
2566 #endif
2567 		splx(s);
2568 	} else {
2569 
2570 		/*
2571 		 * For user processes we just allocate a page from the
2572 		 * VM system.  Note that we set the page "wired" count to 1,
2573 		 * which is what we use to check if the page can be freed.
2574 		 * See pmap_remove_mapping().
2575 		 *
2576 		 * Count the segment table reference first so that we won't
2577 		 * lose the segment table when low on memory.
2578 		 */
2579 
2580 		pmap->pm_sref++;
2581 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
2582 		    ("enter: about to alloc UPT pg at %lx\n", va));
2583 		rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
2584 		while ((pg = uvm_pagealloc(uvm_kernel_object,
2585 					   va - vm_map_min(kernel_map),
2586 					   NULL, UVM_PGA_ZERO)) == NULL) {
2587 			rw_exit(uvm_kernel_object->vmobjlock);
2588 			if (can_fail) {
2589 				pmap->pm_sref--;
2590 				return ENOMEM;
2591 			}
2592 			uvm_wait("ptpage");
2593 			rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
2594 		}
2595 		rw_exit(uvm_kernel_object->vmobjlock);
2596 		pg->flags &= ~(PG_BUSY|PG_FAKE);
2597 		UVM_PAGE_OWN(pg, NULL);
2598 		ptpa = VM_PAGE_TO_PHYS(pg);
2599 		pmap_enter(pmap_kernel(), va, ptpa,
2600 		    VM_PROT_READ | VM_PROT_WRITE,
2601 		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
2602 		pmap_update(pmap_kernel());
2603 	}
2604 #if defined(M68040) || defined(M68060)
2605 	/*
2606 	 * Turn off copyback caching of page table pages,
2607 	 * could get ugly otherwise.
2608 	 */
2609 #if defined(M68020) || defined(M68030)
2610 	if (mmutype == MMU_68040)
2611 #endif
2612 	{
2613 #ifdef DEBUG
2614 		pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
2615 		if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
2616 			printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n",
2617 			    pmap == pmap_kernel() ? "Kernel" : "User",
2618 			    va, ptpa, pte, *pte);
2619 #endif
2620 		if (pmap_changebit(ptpa, PG_CI, ~PG_CCB))
2621 			DCIS();
2622 	}
2623 #endif
2624 	/*
2625 	 * Locate the PV entry in the kernel for this PT page and
2626 	 * record the STE address.  This is so that we can invalidate
2627 	 * the STE when we remove the mapping for the page.
2628 	 */
2629 	pvh = pa_to_pvh(ptpa);
2630 	s = splvm();
2631 	if (pvh) {
2632 		pv = &pvh->pvh_first;
2633 		pvh->pvh_attrs |= PVH_PTPAGE;
2634 		do {
2635 			if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
2636 				break;
2637 		} while ((pv = pv->pv_next));
2638 	} else {
2639 		pv = NULL;
2640 	}
2641 #ifdef DEBUG
2642 	if (pv == NULL)
2643 		panic("pmap_enter_ptpage: PT page not entered");
2644 #endif
2645 	pv->pv_ptste = ste;
2646 	pv->pv_ptpmap = pmap;
2647 
2648 	PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
2649 	    ("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste));
2650 
2651 	/*
2652 	 * Map the new PT page into the segment table.
2653 	 * Also increment the reference count on the segment table if this
2654 	 * was a user page table page.  Note that we don't use vm_map_pageable
2655 	 * to keep the count like we do for PT pages, this is mostly because
2656 	 * it would be difficult to identify ST pages in pmap_pageable to
2657 	 * release them.  We also avoid the overhead of vm_map_pageable.
2658 	 */
2659 #if defined(M68040) || defined(M68060)
2660 #if defined(M68020) || defined(M68030)
2661 	if (mmutype == MMU_68040)
2662 #endif
2663 	{
2664 		st_entry_t *este;
2665 
2666 		for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
2667 			*ste = ptpa | SG_U | SG_RW | SG_V;
2668 			ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
2669 		}
2670 	}
2671 #if defined(M68020) || defined(M68030)
2672 	else
2673 		*ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
2674 #endif
2675 #else
2676 	*ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
2677 #endif
2678 	if (pmap != pmap_kernel()) {
2679 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2680 		    ("enter: stab %p refcnt %d\n",
2681 		    pmap->pm_stab, pmap->pm_sref));
2682 	}
2683 	/*
2684 	 * Flush stale TLB info.
2685 	 */
2686 	if (pmap == pmap_kernel())
2687 		TBIAS();
2688 	else
2689 		TBIAU();
2690 	pmap->pm_ptpages++;
2691 	splx(s);
2692 
2693 	return 0;
2694 }
2695 
2696 /*
2697  * pmap_ptpage_addref:
2698  *
2699  *	Add a reference to the specified PT page.
2700  */
2701 void
pmap_ptpage_addref(vaddr_t ptpva)2702 pmap_ptpage_addref(vaddr_t ptpva)
2703 {
2704 	struct vm_page *pg;
2705 
2706 	rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
2707 	pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
2708 	pg->wire_count++;
2709 	PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2710 	    ("ptpage addref: pg %p now %d\n",
2711 	     pg, pg->wire_count));
2712 	rw_exit(uvm_kernel_object->vmobjlock);
2713 }
2714 
2715 /*
2716  * pmap_ptpage_delref:
2717  *
2718  *	Delete a reference to the specified PT page.
2719  */
2720 int
pmap_ptpage_delref(vaddr_t ptpva)2721 pmap_ptpage_delref(vaddr_t ptpva)
2722 {
2723 	struct vm_page *pg;
2724 	int rv;
2725 
2726 	rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
2727 	pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
2728 	rv = --pg->wire_count;
2729 	PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2730 	    ("ptpage delref: pg %p now %d\n",
2731 	     pg, pg->wire_count));
2732 	rw_exit(uvm_kernel_object->vmobjlock);
2733 	return rv;
2734 }
2735 
2736 /*
2737  *	Routine:        pmap_procwr
2738  *
2739  *	Function:
2740  *		Synchronize caches corresponding to [addr, addr + len) in p.
2741  */
2742 void
pmap_procwr(struct proc * p,vaddr_t va,size_t len)2743 pmap_procwr(struct proc	*p, vaddr_t va, size_t len)
2744 {
2745 
2746 	(void)cachectl1(0x80000004, va, len, p);
2747 }
2748 
2749 void
_pmap_set_page_cacheable(pmap_t pmap,vaddr_t va)2750 _pmap_set_page_cacheable(pmap_t pmap, vaddr_t va)
2751 {
2752 
2753 	if (!pmap_ste_v(pmap, va))
2754 		return;
2755 
2756 #if defined(M68040) || defined(M68060)
2757 #if defined(M68020) || defined(M68030)
2758 	if (mmutype == MMU_68040) {
2759 #endif
2760 	if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CCB, ~PG_CI))
2761 		DCIS();
2762 
2763 #if defined(M68020) || defined(M68030)
2764 	} else
2765 		pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, ~PG_CI);
2766 #endif
2767 #else
2768 	pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, ~PG_CI);
2769 #endif
2770 }
2771 
2772 void
_pmap_set_page_cacheinhibit(pmap_t pmap,vaddr_t va)2773 _pmap_set_page_cacheinhibit(pmap_t pmap, vaddr_t va)
2774 {
2775 
2776 	if (!pmap_ste_v(pmap, va))
2777 		return;
2778 
2779 #if defined(M68040) || defined(M68060)
2780 #if defined(M68020) || defined(M68030)
2781 	if (mmutype == MMU_68040) {
2782 #endif
2783 	if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~PG_CCB))
2784 		DCIS();
2785 #if defined(M68020) || defined(M68030)
2786 	} else
2787 		pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
2788 #endif
2789 #else
2790 	pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
2791 #endif
2792 }
2793 
2794 int
_pmap_page_is_cacheable(pmap_t pmap,vaddr_t va)2795 _pmap_page_is_cacheable(pmap_t pmap, vaddr_t va)
2796 {
2797 
2798 	if (!pmap_ste_v(pmap, va))
2799 		return 0;
2800 
2801 	return (pmap_pte_ci(pmap_pte(pmap, va)) == 0) ? 1 : 0;
2802 }
2803 
2804 #ifdef DEBUG
2805 /*
2806  * pmap_pvdump:
2807  *
2808  *	Dump the contents of the PV list for the specified physical page.
2809  */
2810 void
pmap_pvdump(paddr_t pa)2811 pmap_pvdump(paddr_t pa)
2812 {
2813 	struct pv_header *pvh;
2814 	struct pv_entry *pv;
2815 
2816 	printf("pa %lx", pa);
2817 	pvh = pa_to_pvh(pa);
2818 	for (pv = &pvh->pvh_first; pv; pv = pv->pv_next)
2819 		printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p",
2820 		    pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap);
2821 	printf("\n");
2822 }
2823 
2824 /*
2825  * pmap_check_wiring:
2826  *
2827  *	Count the number of valid mappings in the specified PT page,
2828  *	and ensure that it is consistent with the number of wirings
2829  *	to that page that the VM system has.
2830  */
2831 void
pmap_check_wiring(const char * str,vaddr_t va)2832 pmap_check_wiring(const char *str, vaddr_t va)
2833 {
2834 	pt_entry_t *pte;
2835 	paddr_t pa;
2836 	struct vm_page *pg;
2837 	int count;
2838 
2839 	if (!pmap_ste_v(pmap_kernel(), va) ||
2840 	    !pmap_pte_v(pmap_pte(pmap_kernel(), va)))
2841 		return;
2842 
2843 	pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
2844 	pg = PHYS_TO_VM_PAGE(pa);
2845 	if (pg->wire_count > PAGE_SIZE / sizeof(pt_entry_t)) {
2846 		panic("*%s*: 0x%lx: wire count %d", str, va, pg->wire_count);
2847 	}
2848 
2849 	count = 0;
2850 	for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + PAGE_SIZE);
2851 	     pte++)
2852 		if (*pte)
2853 			count++;
2854 	if (pg->wire_count != count)
2855 		panic("*%s*: 0x%lx: w%d/a%d",
2856 		       str, va, pg->wire_count, count);
2857 }
2858 #endif /* DEBUG */
2859