xref: /netbsd/sys/arch/m68k/m68k/pmap_motorola.c (revision c4a72b64)
1 /*	$NetBSD: pmap_motorola.c,v 1.1 2002/11/05 07:41:25 chs Exp $        */
2 
3 /*-
4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Copyright (c) 1991, 1993
41  *	The Regents of the University of California.  All rights reserved.
42  *
43  * This code is derived from software contributed to Berkeley by
44  * the Systems Programming Group of the University of Utah Computer
45  * Science Department.
46  *
47  * Redistribution and use in source and binary forms, with or without
48  * modification, are permitted provided that the following conditions
49  * are met:
50  * 1. Redistributions of source code must retain the above copyright
51  *    notice, this list of conditions and the following disclaimer.
52  * 2. Redistributions in binary form must reproduce the above copyright
53  *    notice, this list of conditions and the following disclaimer in the
54  *    documentation and/or other materials provided with the distribution.
55  * 3. All advertising materials mentioning features or use of this software
56  *    must display the following acknowledgement:
57  *	This product includes software developed by the University of
58  *	California, Berkeley and its contributors.
59  * 4. Neither the name of the University nor the names of its contributors
60  *    may be used to endorse or promote products derived from this software
61  *    without specific prior written permission.
62  *
63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73  * SUCH DAMAGE.
74  *
75  *	@(#)pmap.c	8.6 (Berkeley) 5/27/94
76  */
77 
78 /*
79  * Motorola m68k-family physical map management code.
80  *
81  * Supports:
82  *	68020 with 68851 MMU
83  *	68030 with on-chip MMU
84  *	68040 with on-chip MMU
85  *	68060 with on-chip MMU
86  *
87  * Notes:
88  *	Don't even pay lip service to multiprocessor support.
89  *
90  *	We assume TLB entries don't have process tags (except for the
91  *	supervisor/user distinction) so we only invalidate TLB entries
92  *	when changing mappings for the current (or kernel) pmap.  This is
93  *	technically not true for the 68851 but we flush the TLB on every
94  *	context switch, so it effectively winds up that way.
95  *
96  *	Bitwise and/or operations are significantly faster than bitfield
97  *	references so we use them when accessing STE/PTEs in the pmap_pte_*
98  *	macros.  Note also that the two are not always equivalent; e.g.:
99  *		(*pte & PG_PROT) [4] != pte->pg_prot [1]
100  *	and a couple of routines that deal with protection and wiring take
101  *	some shortcuts that assume the and/or definitions.
102  *
103  *	This implementation will only work for PAGE_SIZE == NBPG
104  *	(i.e. 4096 bytes).
105  */
106 
107 /*
108  *	Manages physical address maps.
109  *
110  *	In addition to hardware address maps, this
111  *	module is called upon to provide software-use-only
112  *	maps which may or may not be stored in the same
113  *	form as hardware maps.  These pseudo-maps are
114  *	used to store intermediate results from copy
115  *	operations to and from address spaces.
116  *
117  *	Since the information managed by this module is
118  *	also stored by the logical address mapping module,
119  *	this module may throw away valid virtual-to-physical
120  *	mappings at almost any time.  However, invalidations
121  *	of virtual-to-physical mappings must be done as
122  *	requested.
123  *
124  *	In order to cope with hardware architectures which
125  *	make virtual-to-physical map invalidates expensive,
126  *	this module may delay invalidate or reduced protection
127  *	operations until such time as they are actually
128  *	necessary.  This module is given full information as
129  *	to which processors are currently using which maps,
130  *	and to when physical maps must be made correct.
131  */
132 
133 #include <sys/cdefs.h>
134 __KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.1 2002/11/05 07:41:25 chs Exp $");
135 
136 #include "opt_compat_hpux.h"
137 
138 #include <sys/param.h>
139 #include <sys/systm.h>
140 #include <sys/proc.h>
141 #include <sys/malloc.h>
142 #include <sys/user.h>
143 #include <sys/pool.h>
144 
145 #include <machine/pte.h>
146 
147 #include <uvm/uvm.h>
148 
149 #include <machine/cpu.h>
150 #include <m68k/cacheops.h>
151 
152 #ifdef DEBUG
153 #define PDB_FOLLOW	0x0001
154 #define PDB_INIT	0x0002
155 #define PDB_ENTER	0x0004
156 #define PDB_REMOVE	0x0008
157 #define PDB_CREATE	0x0010
158 #define PDB_PTPAGE	0x0020
159 #define PDB_CACHE	0x0040
160 #define PDB_BITS	0x0080
161 #define PDB_COLLECT	0x0100
162 #define PDB_PROTECT	0x0200
163 #define PDB_SEGTAB	0x0400
164 #define PDB_MULTIMAP	0x0800
165 #define PDB_PARANOIA	0x2000
166 #define PDB_WIRING	0x4000
167 #define PDB_PVDUMP	0x8000
168 
169 int debugmap = 0;
170 int pmapdebug = PDB_PARANOIA;
171 
172 #define	PMAP_DPRINTF(l, x)	if (pmapdebug & (l)) printf x
173 #else /* ! DEBUG */
174 #define	PMAP_DPRINTF(l, x)	/* nothing */
175 #endif /* DEBUG */
176 
177 /*
178  * Get STEs and PTEs for user/kernel address space
179  */
180 #if defined(M68040) || defined(M68060)
181 #define	pmap_ste1(m, v)	\
182 	(&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
183 /* XXX assumes physically contiguous ST pages (if more than one) */
184 #define pmap_ste2(m, v) \
185 	(&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
186 			- (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
187 #if defined(M68020) || defined(M68030)
188 #define	pmap_ste(m, v)	\
189 	(&((m)->pm_stab[(vaddr_t)(v) \
190 			>> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
191 #define pmap_ste_v(m, v) \
192 	(mmutype == MMU_68040 \
193 	 ? ((*pmap_ste1(m, v) & SG_V) && \
194 	    (*pmap_ste2(m, v) & SG_V)) \
195 	 : (*pmap_ste(m, v) & SG_V))
196 #else
197 #define	pmap_ste(m, v)	\
198 	(&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
199 #define pmap_ste_v(m, v) \
200 	((*pmap_ste1(m, v) & SG_V) && (*pmap_ste2(m, v) & SG_V))
201 #endif
202 #else
203 #define	pmap_ste(m, v)	 (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
204 #define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
205 #endif
206 
207 #define pmap_pte(m, v)	(&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
208 #define pmap_pte_pa(pte)	(*(pte) & PG_FRAME)
209 #define pmap_pte_w(pte)		(*(pte) & PG_W)
210 #define pmap_pte_ci(pte)	(*(pte) & PG_CI)
211 #define pmap_pte_m(pte)		(*(pte) & PG_M)
212 #define pmap_pte_u(pte)		(*(pte) & PG_U)
213 #define pmap_pte_prot(pte)	(*(pte) & PG_PROT)
214 #define pmap_pte_v(pte)		(*(pte) & PG_V)
215 
216 #define pmap_pte_set_w(pte, v) \
217 	if (v) *(pte) |= PG_W; else *(pte) &= ~PG_W
218 #define pmap_pte_set_prot(pte, v) \
219 	if (v) *(pte) |= PG_PROT; else *(pte) &= ~PG_PROT
220 #define pmap_pte_w_chg(pte, nw)		((nw) ^ pmap_pte_w(pte))
221 #define pmap_pte_prot_chg(pte, np)	((np) ^ pmap_pte_prot(pte))
222 
223 /*
224  * Given a map and a machine independent protection code,
225  * convert to an m68k protection code.
226  */
227 #define pte_prot(m, p)	(protection_codes[p])
228 int	protection_codes[8];
229 
230 /*
231  * Kernel page table page management.
232  */
233 struct kpt_page {
234 	struct kpt_page *kpt_next;	/* link on either used or free list */
235 	vaddr_t		kpt_va;		/* always valid kernel VA */
236 	paddr_t		kpt_pa;		/* PA of this page (for speed) */
237 };
238 struct kpt_page *kpt_free_list, *kpt_used_list;
239 struct kpt_page *kpt_pages;
240 
241 /*
242  * Kernel segment/page table and page table map.
243  * The page table map gives us a level of indirection we need to dynamically
244  * expand the page table.  It is essentially a copy of the segment table
245  * with PTEs instead of STEs.  All are initialized in locore at boot time.
246  * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
247  * Segtabzero is an empty segment table which all processes share til they
248  * reference something.
249  */
250 st_entry_t	*Sysseg;
251 pt_entry_t	*Sysmap, *Sysptmap;
252 st_entry_t	*Segtabzero, *Segtabzeropa;
253 vsize_t		Sysptsize = VM_KERNEL_PT_PAGES;
254 
255 struct pmap	kernel_pmap_store;
256 struct vm_map	*st_map, *pt_map;
257 struct vm_map	st_map_store, pt_map_store;
258 
259 paddr_t		avail_start;	/* PA of first available physical page */
260 paddr_t		avail_end;	/* PA of last available physical page */
261 vsize_t		mem_size;	/* memory size in bytes */
262 vaddr_t		virtual_avail;  /* VA of first avail page (after kernel bss)*/
263 vaddr_t		virtual_end;	/* VA of last avail page (end of kernel AS) */
264 int		page_cnt;	/* number of pages managed by VM system */
265 
266 boolean_t	pmap_initialized = FALSE;	/* Has pmap_init completed? */
267 struct pv_entry	*pv_table;
268 char		*pmap_attributes;	/* reference and modify bits */
269 TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
270 int		pv_nfree;
271 
272 #ifdef M68K_MMU_HP
273 int		pmap_aliasmask;	/* seperation at which VA aliasing ok */
274 #endif
275 #if defined(M68040) || defined(M68060)
276 int		protostfree;	/* prototype (default) free ST map */
277 #endif
278 
279 extern caddr_t	CADDR1, CADDR2;
280 
281 pt_entry_t	*caddr1_pte;	/* PTE for CADDR1 */
282 pt_entry_t	*caddr2_pte;	/* PTE for CADDR2 */
283 
284 struct pool	pmap_pmap_pool;	/* memory pool for pmap structures */
285 
286 struct pv_entry *pmap_alloc_pv __P((void));
287 void	pmap_free_pv __P((struct pv_entry *));
288 void	pmap_collect_pv __P((void));
289 #ifdef COMPAT_HPUX
290 int	pmap_mapmulti __P((pmap_t, vaddr_t));
291 #endif /* COMPAT_HPUX */
292 
293 #define	PAGE_IS_MANAGED(pa)	(pmap_initialized &&			\
294 				 vm_physseg_find(atop((pa)), NULL) != -1)
295 
296 #define	pa_to_pvh(pa)							\
297 ({									\
298 	int bank_, pg_;							\
299 									\
300 	bank_ = vm_physseg_find(atop((pa)), &pg_);			\
301 	&vm_physmem[bank_].pmseg.pvent[pg_];				\
302 })
303 
304 #define	pa_to_attribute(pa)						\
305 ({									\
306 	int bank_, pg_;							\
307 									\
308 	bank_ = vm_physseg_find(atop((pa)), &pg_);			\
309 	&vm_physmem[bank_].pmseg.attrs[pg_];				\
310 })
311 
312 /*
313  * Internal routines
314  */
315 void	pmap_remove_mapping __P((pmap_t, vaddr_t, pt_entry_t *, int));
316 void	pmap_do_remove __P((pmap_t, vaddr_t, vaddr_t, int));
317 boolean_t pmap_testbit __P((paddr_t, int));
318 boolean_t pmap_changebit __P((paddr_t, int, int));
319 void	pmap_enter_ptpage	__P((pmap_t, vaddr_t));
320 void	pmap_ptpage_addref __P((vaddr_t));
321 int	pmap_ptpage_delref __P((vaddr_t));
322 void	pmap_collect1	__P((pmap_t, paddr_t, paddr_t));
323 void	pmap_pinit __P((pmap_t));
324 void	pmap_release __P((pmap_t));
325 
326 #ifdef DEBUG
327 void pmap_pvdump	__P((paddr_t));
328 void pmap_check_wiring	__P((char *, vaddr_t));
329 #endif
330 
331 /* pmap_remove_mapping flags */
332 #define	PRM_TFLUSH	0x01
333 #define	PRM_CFLUSH	0x02
334 #define	PRM_KEEPPTPAGE	0x04
335 
336 /*
337  * pmap_virtual_space:		[ INTERFACE ]
338  *
339  *	Report the range of available kernel virtual address
340  *	space to the VM system during bootstrap.
341  *
342  *	This is only an interface function if we do not use
343  *	pmap_steal_memory()!
344  *
345  *	Note: no locking is necessary in this function.
346  */
347 void
348 pmap_virtual_space(vstartp, vendp)
349 	vaddr_t	*vstartp, *vendp;
350 {
351 
352 	*vstartp = virtual_avail;
353 	*vendp = virtual_end;
354 }
355 
356 /*
357  * pmap_init:			[ INTERFACE ]
358  *
359  *	Initialize the pmap module.  Called by vm_init(), to initialize any
360  *	structures that the pmap system needs to map virtual memory.
361  *
362  *	Note: no locking is necessary in this function.
363  */
364 void
365 pmap_init()
366 {
367 	vaddr_t		addr, addr2;
368 	vsize_t		s;
369 	struct pv_entry	*pv;
370 	char		*attr;
371 	int		rv;
372 	int		npages;
373 	int		bank;
374 
375 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
376 
377 	/*
378 	 * Before we do anything else, initialize the PTE pointers
379 	 * used by pmap_zero_page() and pmap_copy_page().
380 	 */
381 	caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
382 	caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
383 
384 	/*
385 	 * Now that kernel map has been allocated, we can mark as
386 	 * unavailable regions which we have mapped in pmap_bootstrap().
387 	 */
388 
389 	pmap_init_md();
390 	addr = (vaddr_t) Sysmap;
391 	if (uvm_map(kernel_map, &addr, M68K_MAX_PTSIZE,
392 		    NULL, UVM_UNKNOWN_OFFSET, 0,
393 		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
394 				UVM_INH_NONE, UVM_ADV_RANDOM,
395 				UVM_FLAG_FIXED)) != 0) {
396 		/*
397 		 * If this fails, it is probably because the static
398 		 * portion of the kernel page table isn't big enough
399 		 * and we overran the page table map.
400 		 */
401 
402 		panic("pmap_init: bogons in the VM system!");
403 	}
404 
405 	PMAP_DPRINTF(PDB_INIT,
406 	    ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
407 	    Sysseg, Sysmap, Sysptmap));
408 	PMAP_DPRINTF(PDB_INIT,
409 	    ("  pstart %lx, pend %lx, vstart %lx, vend %lx\n",
410 	    avail_start, avail_end, virtual_avail, virtual_end));
411 
412 	/*
413 	 * Allocate memory for random pmap data structures.  Includes the
414 	 * initial segment table, pv_head_table and pmap_attributes.
415 	 */
416 	for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++)
417 		page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
418 	s = M68K_STSIZE;					/* Segtabzero */
419 	s += page_cnt * sizeof(struct pv_entry);	/* pv table */
420 	s += page_cnt * sizeof(char);			/* attribute table */
421 	s = round_page(s);
422 	addr = uvm_km_zalloc(kernel_map, s);
423 	if (addr == 0)
424 		panic("pmap_init: can't allocate data structures");
425 
426 	Segtabzero = (st_entry_t *) addr;
427 	(void) pmap_extract(pmap_kernel(), addr, (paddr_t *)&Segtabzeropa);
428 	addr += M68K_STSIZE;
429 
430 	pv_table = (struct pv_entry *) addr;
431 	addr += page_cnt * sizeof(struct pv_entry);
432 
433 	pmap_attributes = (char *) addr;
434 
435 	PMAP_DPRINTF(PDB_INIT, ("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
436 	    "tbl %p atr %p\n",
437 	    s, page_cnt, Segtabzero, Segtabzeropa,
438 	    pv_table, pmap_attributes));
439 
440 	/*
441 	 * Now that the pv and attribute tables have been allocated,
442 	 * assign them to the memory segments.
443 	 */
444 	pv = pv_table;
445 	attr = pmap_attributes;
446 	for (bank = 0; bank < vm_nphysseg; bank++) {
447 		npages = vm_physmem[bank].end - vm_physmem[bank].start;
448 		vm_physmem[bank].pmseg.pvent = pv;
449 		vm_physmem[bank].pmseg.attrs = attr;
450 		pv += npages;
451 		attr += npages;
452 	}
453 
454 	/*
455 	 * Allocate physical memory for kernel PT pages and their management.
456 	 * We need 1 PT page per possible task plus some slop.
457 	 */
458 	npages = min(atop(M68K_MAX_KPTSIZE), maxproc+16);
459 	s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
460 
461 	/*
462 	 * Verify that space will be allocated in region for which
463 	 * we already have kernel PT pages.
464 	 */
465 	addr = 0;
466 	rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
467 		     UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
468 				 UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
469 	if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap)
470 		panic("pmap_init: kernel PT too small");
471 	uvm_unmap(kernel_map, addr, addr + s);
472 
473 	/*
474 	 * Now allocate the space and link the pages together to
475 	 * form the KPT free list.
476 	 */
477 	addr = uvm_km_zalloc(kernel_map, s);
478 	if (addr == 0)
479 		panic("pmap_init: cannot allocate KPT free list");
480 	s = ptoa(npages);
481 	addr2 = addr + s;
482 	kpt_pages = &((struct kpt_page *)addr2)[npages];
483 	kpt_free_list = NULL;
484 	do {
485 		addr2 -= NBPG;
486 		(--kpt_pages)->kpt_next = kpt_free_list;
487 		kpt_free_list = kpt_pages;
488 		kpt_pages->kpt_va = addr2;
489 		(void) pmap_extract(pmap_kernel(), addr2,
490 		    (paddr_t *)&kpt_pages->kpt_pa);
491 	} while (addr != addr2);
492 
493 	PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
494 	    atop(s), addr, addr + s));
495 
496 	/*
497 	 * Allocate the segment table map and the page table map.
498 	 */
499 	s = maxproc * M68K_STSIZE;
500 	st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, FALSE,
501 	    &st_map_store);
502 
503 	addr = M68K_PTBASE;
504 	if ((M68K_PTMAXSIZE / M68K_MAX_PTSIZE) < maxproc) {
505 		s = M68K_PTMAXSIZE;
506 		/*
507 		 * XXX We don't want to hang when we run out of
508 		 * page tables, so we lower maxproc so that fork()
509 		 * will fail instead.  Note that root could still raise
510 		 * this value via sysctl(3).
511 		 */
512 		maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE);
513 	} else
514 		s = (maxproc * M68K_MAX_PTSIZE);
515 	pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
516 	    TRUE, &pt_map_store);
517 
518 #if defined(M68040) || defined(M68060)
519 	if (mmutype == MMU_68040) {
520 		protostfree = ~l2tobm(0);
521 		for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
522 			protostfree &= ~l2tobm(rv);
523 	}
524 #endif
525 
526 	/*
527 	 * Initialize the pmap pools.
528 	 */
529 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
530 	    &pool_allocator_nointr);
531 
532 	/*
533 	 * Now that this is done, mark the pages shared with the
534 	 * hardware page table search as non-CCB (actually, as CI).
535 	 *
536 	 * XXX Hm. Given that this is in the kernel map, can't we just
537 	 * use the va's?
538 	 */
539 #ifdef M68060
540 #if defined(M68020) || defined(M68030) || defined(M68040)
541 	if (cputype == CPU_68060)
542 #endif
543 	{
544 		struct kpt_page *kptp = kpt_free_list;
545 		paddr_t paddr;
546 
547 		while (kptp) {
548 			pmap_changebit(kptp->kpt_pa, PG_CI, ~PG_CCB);
549 			kptp = kptp->kpt_next;
550 		}
551 
552 		paddr = (paddr_t)Segtabzeropa;
553 		while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) {
554 			pmap_changebit(paddr, PG_CI, ~PG_CCB);
555 			paddr += NBPG;
556 		}
557 
558 		DCIS();
559 	}
560 #endif
561 
562 	/*
563 	 * Now it is safe to enable pv_table recording.
564 	 */
565 	pmap_initialized = TRUE;
566 }
567 
568 /*
569  * pmap_alloc_pv:
570  *
571  *	Allocate a pv_entry.
572  */
573 struct pv_entry *
574 pmap_alloc_pv()
575 {
576 	struct pv_page *pvp;
577 	struct pv_entry *pv;
578 	int i;
579 
580 	if (pv_nfree == 0) {
581 		pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG);
582 		if (pvp == 0)
583 			panic("pmap_alloc_pv: uvm_km_zalloc() failed");
584 		pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
585 		for (i = NPVPPG - 2; i; i--, pv++)
586 			pv->pv_next = pv + 1;
587 		pv->pv_next = 0;
588 		pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
589 		TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
590 		pv = &pvp->pvp_pv[0];
591 	} else {
592 		--pv_nfree;
593 		pvp = pv_page_freelist.tqh_first;
594 		if (--pvp->pvp_pgi.pgi_nfree == 0) {
595 			TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
596 		}
597 		pv = pvp->pvp_pgi.pgi_freelist;
598 #ifdef DIAGNOSTIC
599 		if (pv == 0)
600 			panic("pmap_alloc_pv: pgi_nfree inconsistent");
601 #endif
602 		pvp->pvp_pgi.pgi_freelist = pv->pv_next;
603 	}
604 	return pv;
605 }
606 
607 /*
608  * pmap_free_pv:
609  *
610  *	Free a pv_entry.
611  */
612 void
613 pmap_free_pv(pv)
614 	struct pv_entry *pv;
615 {
616 	struct pv_page *pvp;
617 
618 	pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
619 	switch (++pvp->pvp_pgi.pgi_nfree) {
620 	case 1:
621 		TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
622 	default:
623 		pv->pv_next = pvp->pvp_pgi.pgi_freelist;
624 		pvp->pvp_pgi.pgi_freelist = pv;
625 		++pv_nfree;
626 		break;
627 	case NPVPPG:
628 		pv_nfree -= NPVPPG - 1;
629 		TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
630 		uvm_km_free(kernel_map, (vaddr_t)pvp, NBPG);
631 		break;
632 	}
633 }
634 
635 /*
636  * pmap_collect_pv:
637  *
638  *	Perform compaction on the PV list, called via pmap_collect().
639  */
640 void
641 pmap_collect_pv()
642 {
643 	struct pv_page_list pv_page_collectlist;
644 	struct pv_page *pvp, *npvp;
645 	struct pv_entry *ph, *ppv, *pv, *npv;
646 	int s;
647 
648 	TAILQ_INIT(&pv_page_collectlist);
649 
650 	for (pvp = pv_page_freelist.tqh_first; pvp; pvp = npvp) {
651 		if (pv_nfree < NPVPPG)
652 			break;
653 		npvp = pvp->pvp_pgi.pgi_list.tqe_next;
654 		if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
655 			TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
656 			TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp,
657 			    pvp_pgi.pgi_list);
658 			pv_nfree -= NPVPPG;
659 			pvp->pvp_pgi.pgi_nfree = -1;
660 		}
661 	}
662 
663 	if (pv_page_collectlist.tqh_first == 0)
664 		return;
665 
666 	for (ph = &pv_table[page_cnt - 1]; ph >= &pv_table[0]; ph--) {
667 		if (ph->pv_pmap == 0)
668 			continue;
669 		s = splvm();
670 		for (ppv = ph; (pv = ppv->pv_next) != 0; ) {
671 			pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
672 			if (pvp->pvp_pgi.pgi_nfree == -1) {
673 				pvp = pv_page_freelist.tqh_first;
674 				if (--pvp->pvp_pgi.pgi_nfree == 0) {
675 					TAILQ_REMOVE(&pv_page_freelist, pvp,
676 					    pvp_pgi.pgi_list);
677 				}
678 				npv = pvp->pvp_pgi.pgi_freelist;
679 #ifdef DIAGNOSTIC
680 				if (npv == 0)
681 					panic("pmap_collect_pv: pgi_nfree inconsistent");
682 #endif
683 				pvp->pvp_pgi.pgi_freelist = npv->pv_next;
684 				*npv = *pv;
685 				ppv->pv_next = npv;
686 				ppv = npv;
687 			} else
688 				ppv = pv;
689 		}
690 		splx(s);
691 	}
692 
693 	for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
694 		npvp = pvp->pvp_pgi.pgi_list.tqe_next;
695 		uvm_km_free(kernel_map, (vaddr_t)pvp, NBPG);
696 	}
697 }
698 
699 /*
700  * pmap_map:
701  *
702  *	Used to map a range of physical addresses into kernel
703  *	virtual address space.
704  *
705  *	For now, VM is already on, we only need to map the
706  *	specified memory.
707  *
708  *	Note: THIS FUNCTION IS DEPRECATED, AND SHOULD BE REMOVED!
709  */
710 vaddr_t
711 pmap_map(va, spa, epa, prot)
712 	vaddr_t va;
713 	paddr_t spa, epa;
714 	int prot;
715 {
716 
717 	PMAP_DPRINTF(PDB_FOLLOW,
718 	    ("pmap_map(%lx, %lx, %lx, %x)\n", va, spa, epa, prot));
719 
720 	while (spa < epa) {
721 		pmap_enter(pmap_kernel(), va, spa, prot, 0);
722 		va += NBPG;
723 		spa += NBPG;
724 	}
725 	pmap_update(pmap_kernel());
726 	return (va);
727 }
728 
729 /*
730  * pmap_create:			[ INTERFACE ]
731  *
732  *	Create and return a physical map.
733  *
734  *	Note: no locking is necessary in this function.
735  */
736 pmap_t
737 pmap_create()
738 {
739 	struct pmap *pmap;
740 
741 	PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
742 	    ("pmap_create()\n"));
743 
744 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
745 	memset(pmap, 0, sizeof(*pmap));
746 	pmap_pinit(pmap);
747 	return (pmap);
748 }
749 
750 /*
751  * pmap_pinit:
752  *
753  *	Initialize a preallocated and zeroed pmap structure.
754  *
755  *	Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_create()!
756  */
757 void
758 pmap_pinit(pmap)
759 	struct pmap *pmap;
760 {
761 
762 	PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
763 	    ("pmap_pinit(%p)\n", pmap));
764 
765 	/*
766 	 * No need to allocate page table space yet but we do need a
767 	 * valid segment table.  Initially, we point everyone at the
768 	 * "null" segment table.  On the first pmap_enter, a real
769 	 * segment table will be allocated.
770 	 */
771 	pmap->pm_stab = Segtabzero;
772 	pmap->pm_stpa = Segtabzeropa;
773 #if defined(M68040) || defined(M68060)
774 #if defined(M68020) || defined(M68030)
775 	if (mmutype == MMU_68040)
776 #endif
777 		pmap->pm_stfree = protostfree;
778 #endif
779 	pmap->pm_count = 1;
780 	simple_lock_init(&pmap->pm_lock);
781 }
782 
783 /*
784  * pmap_destroy:		[ INTERFACE ]
785  *
786  *	Drop the reference count on the specified pmap, releasing
787  *	all resources if the reference count drops to zero.
788  */
789 void
790 pmap_destroy(pmap)
791 	struct pmap *pmap;
792 {
793 	int count;
794 
795 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
796 
797 	simple_lock(&pmap->pm_lock);
798 	count = --pmap->pm_count;
799 	simple_unlock(&pmap->pm_lock);
800 	if (count == 0) {
801 		pmap_release(pmap);
802 		pool_put(&pmap_pmap_pool, pmap);
803 	}
804 }
805 
806 /*
807  * pmap_release:
808  *
809  *	Relese the resources held by a pmap.
810  *
811  *	Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_destroy().
812  */
813 void
814 pmap_release(pmap)
815 	struct pmap *pmap;
816 {
817 
818 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap));
819 
820 #ifdef notdef /* DIAGNOSTIC */
821 	/* count would be 0 from pmap_destroy... */
822 	simple_lock(&pmap->pm_lock);
823 	if (pmap->pm_count != 1)
824 		panic("pmap_release count");
825 #endif
826 
827 	if (pmap->pm_ptab) {
828 		pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
829 		    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
830 		uvm_km_pgremove(uvm.kernel_object,
831 		    (vaddr_t)pmap->pm_ptab - vm_map_min(kernel_map),
832 		    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE
833 				- vm_map_min(kernel_map));
834 		uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
835 				   M68K_MAX_PTSIZE);
836 	}
837 	KASSERT(pmap->pm_stab == Segtabzero);
838 }
839 
840 /*
841  * pmap_reference:		[ INTERFACE ]
842  *
843  *	Add a reference to the specified pmap.
844  */
845 void
846 pmap_reference(pmap)
847 	pmap_t	pmap;
848 {
849 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
850 
851 	simple_lock(&pmap->pm_lock);
852 	pmap->pm_count++;
853 	simple_unlock(&pmap->pm_lock);
854 }
855 
856 /*
857  * pmap_activate:		[ INTERFACE ]
858  *
859  *	Activate the pmap used by the specified process.  This includes
860  *	reloading the MMU context if the current process, and marking
861  *	the pmap in use by the processor.
862  *
863  *	Note: we may only use spin locks here, since we are called
864  *	by a critical section in cpu_switch()!
865  */
866 void
867 pmap_activate(p)
868 	struct proc *p;
869 {
870 	struct pmap *pmap = p->p_vmspace->vm_map.pmap;
871 
872 	PMAP_DPRINTF(PDB_FOLLOW|PDB_SEGTAB,
873 	    ("pmap_activate(%p)\n", p));
874 
875 	PMAP_ACTIVATE(pmap, p == curproc);
876 }
877 
878 /*
879  * pmap_deactivate:		[ INTERFACE ]
880  *
881  *	Mark that the pmap used by the specified process is no longer
882  *	in use by the processor.
883  *
884  *	The comment above pmap_activate() wrt. locking applies here,
885  *	as well.
886  */
887 void
888 pmap_deactivate(p)
889 	struct proc *p;
890 {
891 
892 	/* No action necessary in this pmap implementation. */
893 }
894 
895 /*
896  * pmap_remove:			[ INTERFACE ]
897  *
898  *	Remove the given range of addresses from the specified map.
899  *
900  *	It is assumed that the start and end are properly
901  *	rounded to the page size.
902  */
903 void
904 pmap_remove(pmap, sva, eva)
905 	struct pmap *pmap;
906 	vaddr_t sva, eva;
907 {
908 
909 	pmap_do_remove(pmap, sva, eva, 1);
910 }
911 
912 void
913 pmap_do_remove(pmap, sva, eva, remove_wired)
914 	struct pmap *pmap;
915 	vaddr_t sva, eva;
916 	int remove_wired;
917 {
918 	vaddr_t nssva;
919 	pt_entry_t *pte;
920 	int flags;
921 #ifdef M68K_MMU_HP
922 	boolean_t firstpage, needcflush;
923 #endif
924 
925 	PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
926 	    ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
927 
928 	flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
929 	while (sva < eva) {
930 		nssva = m68k_trunc_seg(sva) + NBSEG;
931 		if (nssva == 0 || nssva > eva)
932 			nssva = eva;
933 
934 		/*
935 		 * Invalidate every valid mapping within this segment.
936 		 * If remove_wired is zero, skip the wired pages.
937 		 */
938 
939 		pte = pmap_pte(pmap, sva);
940 		while (sva < nssva) {
941 
942 			/*
943 			 * If this segment is unallocated,
944 			 * skip to the next segment boundary.
945 			 */
946 
947 			if (!pmap_ste_v(pmap, sva)) {
948 				sva = nssva;
949 				break;
950 			}
951 
952 
953 
954 			if (pmap_pte_v(pte) &&
955 			    (remove_wired || !pmap_pte_w(pte))) {
956 #ifdef M68K_MMU_HP
957 				if (pmap_aliasmask) {
958 
959 					/*
960 					 * Purge kernel side of VAC to ensure
961 					 * we get the correct state of any
962 					 * hardware maintained bits.
963 					 */
964 
965 					if (firstpage) {
966 						DCIS();
967 					}
968 
969 					/*
970 					 * Remember if we may need to
971 					 * flush the VAC due to a non-CI
972 					 * mapping.
973 					 */
974 
975 					if (!needcflush && !pmap_pte_ci(pte))
976 						needcflush = TRUE;
977 
978 				}
979 #endif
980 				pmap_remove_mapping(pmap, sva, pte, flags);
981 			}
982 			pte++;
983 			sva += NBPG;
984 		}
985 	}
986 
987 #ifdef M68K_MMU_HP
988 
989 	/*
990 	 * Didn't do anything, no need for cache flushes
991 	 */
992 
993 	if (firstpage)
994 		return;
995 
996 	/*
997 	 * In a couple of cases, we don't need to worry about flushing
998 	 * the VAC:
999 	 * 	1. if this is a kernel mapping,
1000 	 *	   we have already done it
1001 	 *	2. if it is a user mapping not for the current process,
1002 	 *	   it won't be there
1003 	 */
1004 
1005 	if (pmap_aliasmask && !active_user_pmap(pmap))
1006 		needcflush = FALSE;
1007 	if (needcflush) {
1008 		if (pmap == pmap_kernel()) {
1009 			DCIS();
1010 		} else {
1011 			DCIU();
1012 		}
1013 	}
1014 #endif
1015 }
1016 
1017 /*
1018  * pmap_page_protect:		[ INTERFACE ]
1019  *
1020  *	Lower the permission for all mappings to a given page to
1021  *	the permissions specified.
1022  */
1023 void
1024 pmap_page_protect(pg, prot)
1025 	struct vm_page *pg;
1026 	vm_prot_t prot;
1027 {
1028 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
1029 	struct pv_entry *pv;
1030 	pt_entry_t *pte;
1031 	int s;
1032 
1033 #ifdef DEBUG
1034 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
1035 	    (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
1036 		printf("pmap_page_protect(%p, %x)\n", pg, prot);
1037 #endif
1038 
1039 	switch (prot) {
1040 	case VM_PROT_READ|VM_PROT_WRITE:
1041 	case VM_PROT_ALL:
1042 		return;
1043 
1044 	/* copy_on_write */
1045 	case VM_PROT_READ:
1046 	case VM_PROT_READ|VM_PROT_EXECUTE:
1047 		pmap_changebit(pa, PG_RO, ~0);
1048 		return;
1049 
1050 	/* remove_all */
1051 	default:
1052 		break;
1053 	}
1054 
1055 	pv = pa_to_pvh(pa);
1056 	s = splvm();
1057 	while (pv->pv_pmap != NULL) {
1058 
1059 		pte = pmap_pte(pv->pv_pmap, pv->pv_va);
1060 #ifdef DEBUG
1061 		if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
1062 		    pmap_pte_pa(pte) != pa)
1063 			panic("pmap_page_protect: bad mapping");
1064 #endif
1065 		pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
1066 		    pte, PRM_TFLUSH|PRM_CFLUSH);
1067 	}
1068 	splx(s);
1069 }
1070 
1071 /*
1072  * pmap_protect:		[ INTERFACE ]
1073  *
1074  *	Set the physical protectoin on the specified range of this map
1075  *	as requested.
1076  */
1077 void
1078 pmap_protect(pmap, sva, eva, prot)
1079 	pmap_t		pmap;
1080 	vaddr_t		sva, eva;
1081 	vm_prot_t	prot;
1082 {
1083 	vaddr_t nssva;
1084 	pt_entry_t *pte;
1085 	boolean_t firstpage, needtflush;
1086 	int isro;
1087 
1088 	PMAP_DPRINTF(PDB_FOLLOW|PDB_PROTECT,
1089 	    ("pmap_protect(%p, %lx, %lx, %x)\n",
1090 	    pmap, sva, eva, prot));
1091 
1092 #ifdef PMAPSTATS
1093 	protect_stats.calls++;
1094 #endif
1095 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1096 		pmap_remove(pmap, sva, eva);
1097 		return;
1098 	}
1099 	isro = pte_prot(pmap, prot);
1100 	needtflush = active_pmap(pmap);
1101 	firstpage = TRUE;
1102 	while (sva < eva) {
1103 		nssva = m68k_trunc_seg(sva) + NBSEG;
1104 		if (nssva == 0 || nssva > eva)
1105 			nssva = eva;
1106 
1107 		/*
1108 		 * If VA belongs to an unallocated segment,
1109 		 * skip to the next segment boundary.
1110 		 */
1111 
1112 		if (!pmap_ste_v(pmap, sva)) {
1113 			sva = nssva;
1114 			continue;
1115 		}
1116 
1117 		/*
1118 		 * Change protection on mapping if it is valid and doesn't
1119 		 * already have the correct protection.
1120 		 */
1121 
1122 		pte = pmap_pte(pmap, sva);
1123 		while (sva < nssva) {
1124 			if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
1125 #ifdef M68K_MMU_HP
1126 
1127 				/*
1128 				 * Purge kernel side of VAC to ensure we
1129 				 * get the correct state of any hardware
1130 				 * maintained bits.
1131 				 *
1132 				 * XXX do we need to clear the VAC in
1133 				 * general to reflect the new protection?
1134 				 */
1135 
1136 				if (firstpage && pmap_aliasmask)
1137 					DCIS();
1138 #endif
1139 
1140 #if defined(M68040) || defined(M68060)
1141 
1142 				/*
1143 				 * Clear caches if making RO (see section
1144 				 * "7.3 Cache Coherency" in the manual).
1145 				 */
1146 
1147 #if defined(M68020) || defined(M68030)
1148 				if (isro && mmutype == MMU_68040)
1149 #else
1150 				if (isro)
1151 #endif
1152 				{
1153 					paddr_t pa = pmap_pte_pa(pte);
1154 
1155 					DCFP(pa);
1156 					ICPP(pa);
1157 				}
1158 #endif
1159 				pmap_pte_set_prot(pte, isro);
1160 				if (needtflush)
1161 					TBIS(sva);
1162 				firstpage = FALSE;
1163 			}
1164 			pte++;
1165 			sva += NBPG;
1166 		}
1167 	}
1168 }
1169 
1170 /*
1171  * pmap_enter:			[ INTERFACE ]
1172  *
1173  *	Insert the given physical page (pa) at
1174  *	the specified virtual address (va) in the
1175  *	target physical map with the protection requested.
1176  *
1177  *	If specified, the page will be wired down, meaning
1178  *	that the related pte cannot be reclaimed.
1179  *
1180  *	Note: This is the only routine which MAY NOT lazy-evaluate
1181  *	or lose information.  Thatis, this routine must actually
1182  *	insert this page into the given map NOW.
1183  */
1184 int
1185 pmap_enter(pmap, va, pa, prot, flags)
1186 	struct pmap *pmap;
1187 	vaddr_t va;
1188 	paddr_t pa;
1189 	vm_prot_t prot;
1190 	int flags;
1191 {
1192 	pt_entry_t *pte;
1193 	int npte;
1194 	paddr_t opa;
1195 	boolean_t cacheable = TRUE;
1196 	boolean_t checkpv = TRUE;
1197 	boolean_t wired = (flags & PMAP_WIRED) != 0;
1198 
1199 	PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
1200 	    ("pmap_enter(%p, %lx, %lx, %x, %x)\n",
1201 	    pmap, va, pa, prot, wired));
1202 
1203 #ifdef DIAGNOSTIC
1204 	/*
1205 	 * pmap_enter() should never be used for CADDR1 and CADDR2.
1206 	 */
1207 	if (pmap == pmap_kernel() &&
1208 	    (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2))
1209 		panic("pmap_enter: used for CADDR1 or CADDR2");
1210 #endif
1211 
1212 	/*
1213 	 * For user mapping, allocate kernel VM resources if necessary.
1214 	 */
1215 	if (pmap->pm_ptab == NULL)
1216 		pmap->pm_ptab = (pt_entry_t *)
1217 			uvm_km_valloc_wait(pt_map, M68K_MAX_PTSIZE);
1218 
1219 	/*
1220 	 * Segment table entry not valid, we need a new PT page
1221 	 */
1222 	if (!pmap_ste_v(pmap, va))
1223 		pmap_enter_ptpage(pmap, va);
1224 
1225 	pa = m68k_trunc_page(pa);
1226 	pte = pmap_pte(pmap, va);
1227 	opa = pmap_pte_pa(pte);
1228 
1229 	PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
1230 
1231 	/*
1232 	 * Mapping has not changed, must be protection or wiring change.
1233 	 */
1234 	if (opa == pa) {
1235 		/*
1236 		 * Wiring change, just update stats.
1237 		 * We don't worry about wiring PT pages as they remain
1238 		 * resident as long as there are valid mappings in them.
1239 		 * Hence, if a user page is wired, the PT page will be also.
1240 		 */
1241 		if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
1242 			PMAP_DPRINTF(PDB_ENTER,
1243 			    ("enter: wiring change -> %x\n", wired));
1244 			if (wired)
1245 				pmap->pm_stats.wired_count++;
1246 			else
1247 				pmap->pm_stats.wired_count--;
1248 		}
1249 		/*
1250 		 * Retain cache inhibition status
1251 		 */
1252 		checkpv = FALSE;
1253 		if (pmap_pte_ci(pte))
1254 			cacheable = FALSE;
1255 		goto validate;
1256 	}
1257 
1258 	/*
1259 	 * Mapping has changed, invalidate old range and fall through to
1260 	 * handle validating new mapping.
1261 	 */
1262 	if (opa) {
1263 		PMAP_DPRINTF(PDB_ENTER,
1264 		    ("enter: removing old mapping %lx\n", va));
1265 		pmap_remove_mapping(pmap, va, pte,
1266 		    PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE);
1267 	}
1268 
1269 	/*
1270 	 * If this is a new user mapping, increment the wiring count
1271 	 * on this PT page.  PT pages are wired down as long as there
1272 	 * is a valid mapping in the page.
1273 	 */
1274 	if (pmap != pmap_kernel())
1275 		pmap_ptpage_addref(trunc_page((vaddr_t)pte));
1276 
1277 	/*
1278 	 * Enter on the PV list if part of our managed memory
1279 	 * Note that we raise IPL while manipulating pv_table
1280 	 * since pmap_enter can be called at interrupt time.
1281 	 */
1282 	if (PAGE_IS_MANAGED(pa)) {
1283 		struct pv_entry *pv, *npv;
1284 		int s;
1285 
1286 		pv = pa_to_pvh(pa);
1287 		s = splvm();
1288 
1289 		PMAP_DPRINTF(PDB_ENTER,
1290 		    ("enter: pv at %p: %lx/%p/%p\n",
1291 		    pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
1292 		/*
1293 		 * No entries yet, use header as the first entry
1294 		 */
1295 		if (pv->pv_pmap == NULL) {
1296 			pv->pv_va = va;
1297 			pv->pv_pmap = pmap;
1298 			pv->pv_next = NULL;
1299 			pv->pv_ptste = NULL;
1300 			pv->pv_ptpmap = NULL;
1301 			pv->pv_flags = 0;
1302 		}
1303 		/*
1304 		 * There is at least one other VA mapping this page.
1305 		 * Place this entry after the header.
1306 		 */
1307 		else {
1308 #ifdef DEBUG
1309 			for (npv = pv; npv; npv = npv->pv_next)
1310 				if (pmap == npv->pv_pmap && va == npv->pv_va)
1311 					panic("pmap_enter: already in pv_tab");
1312 #endif
1313 			npv = pmap_alloc_pv();
1314 			npv->pv_va = va;
1315 			npv->pv_pmap = pmap;
1316 			npv->pv_next = pv->pv_next;
1317 			npv->pv_ptste = NULL;
1318 			npv->pv_ptpmap = NULL;
1319 			npv->pv_flags = 0;
1320 			pv->pv_next = npv;
1321 
1322 #ifdef M68K_MMU_HP
1323 
1324 			/*
1325 			 * Since there is another logical mapping for the
1326 			 * same page we may need to cache-inhibit the
1327 			 * descriptors on those CPUs with external VACs.
1328 			 * We don't need to CI if:
1329 			 *
1330 			 * - No two mappings belong to the same user pmaps.
1331 			 *   Since the cache is flushed on context switches
1332 			 *   there is no problem between user processes.
1333 			 *
1334 			 * - Mappings within a single pmap are a certain
1335 			 *   magic distance apart.  VAs at these appropriate
1336 			 *   boundaries map to the same cache entries or
1337 			 *   otherwise don't conflict.
1338 			 *
1339 			 * To keep it simple, we only check for these special
1340 			 * cases if there are only two mappings, otherwise we
1341 			 * punt and always CI.
1342 			 *
1343 			 * Note that there are no aliasing problems with the
1344 			 * on-chip data-cache when the WA bit is set.
1345 			 */
1346 
1347 			if (pmap_aliasmask) {
1348 				if (pv->pv_flags & PV_CI) {
1349 					PMAP_DPRINTF(PDB_CACHE,
1350 					    ("enter: pa %lx already CI'ed\n",
1351 					    pa));
1352 					checkpv = cacheable = FALSE;
1353 				} else if (npv->pv_next ||
1354 					   ((pmap == pv->pv_pmap ||
1355 					     pmap == pmap_kernel() ||
1356 					     pv->pv_pmap == pmap_kernel()) &&
1357 					    ((pv->pv_va & pmap_aliasmask) !=
1358 					     (va & pmap_aliasmask)))) {
1359 					PMAP_DPRINTF(PDB_CACHE,
1360 					    ("enter: pa %lx CI'ing all\n",
1361 					    pa));
1362 					cacheable = FALSE;
1363 					pv->pv_flags |= PV_CI;
1364 				}
1365 			}
1366 #endif
1367 		}
1368 
1369 		/*
1370 		 * Speed pmap_is_referenced() or pmap_is_modified() based
1371 		 * on the hint provided in access_type.
1372 		 */
1373 #ifdef DIAGNOSTIC
1374 		if ((flags & VM_PROT_ALL) & ~prot)
1375 			panic("pmap_enter: access_type exceeds prot");
1376 #endif
1377 		if (flags & VM_PROT_WRITE)
1378 			*pa_to_attribute(pa) |= (PG_U|PG_M);
1379 		else if (flags & VM_PROT_ALL)
1380 			*pa_to_attribute(pa) |= PG_U;
1381 
1382 		splx(s);
1383 	}
1384 	/*
1385 	 * Assumption: if it is not part of our managed memory
1386 	 * then it must be device memory which may be volitile.
1387 	 */
1388 	else if (pmap_initialized) {
1389 		checkpv = cacheable = FALSE;
1390 	}
1391 
1392 	/*
1393 	 * Increment counters
1394 	 */
1395 	pmap->pm_stats.resident_count++;
1396 	if (wired)
1397 		pmap->pm_stats.wired_count++;
1398 
1399 validate:
1400 #ifdef M68K_MMU_HP
1401 	/*
1402 	 * Purge kernel side of VAC to ensure we get correct state
1403 	 * of HW bits so we don't clobber them.
1404 	 */
1405 	if (pmap_aliasmask)
1406 		DCIS();
1407 #endif
1408 
1409 	/*
1410 	 * Build the new PTE.
1411 	 */
1412 
1413 	npte = pa | pte_prot(pmap, prot) | (*pte & (PG_M|PG_U)) | PG_V;
1414 	if (wired)
1415 		npte |= PG_W;
1416 	if (!checkpv && !cacheable)
1417 #if defined(M68040) || defined(M68060)
1418 #if defined(M68020) || defined(M68030)
1419 		npte |= (mmutype == MMU_68040 ? PG_CIN : PG_CI);
1420 #else
1421 		npte |= PG_CIN;
1422 #endif
1423 #else
1424 		npte |= PG_CI;
1425 #endif
1426 #if defined(M68040) || defined(M68060)
1427 #if defined(M68020) || defined(M68030)
1428 	else if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
1429 #else
1430 	else if ((npte & (PG_PROT|PG_CI)) == PG_RW)
1431 #endif
1432 		npte |= PG_CCB;
1433 #endif
1434 
1435 	PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
1436 
1437 	/*
1438 	 * Remember if this was a wiring-only change.
1439 	 * If so, we need not flush the TLB and caches.
1440 	 */
1441 
1442 	wired = ((*pte ^ npte) == PG_W);
1443 #if defined(M68040) || defined(M68060)
1444 #if defined(M68020) || defined(M68030)
1445 	if (mmutype == MMU_68040 && !wired)
1446 #else
1447 	if (!wired)
1448 #endif
1449 	{
1450 		DCFP(pa);
1451 		ICPP(pa);
1452 	}
1453 #endif
1454 	*pte = npte;
1455 	if (!wired && active_pmap(pmap))
1456 		TBIS(va);
1457 #ifdef M68K_MMU_HP
1458 	/*
1459 	 * The following is executed if we are entering a second
1460 	 * (or greater) mapping for a physical page and the mappings
1461 	 * may create an aliasing problem.  In this case we must
1462 	 * cache inhibit the descriptors involved and flush any
1463 	 * external VAC.
1464 	 */
1465 	if (checkpv && !cacheable) {
1466 		pmap_changebit(pa, PG_CI, ~0);
1467 		DCIA();
1468 #ifdef DEBUG
1469 		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
1470 		    (PDB_CACHE|PDB_PVDUMP))
1471 			pmap_pvdump(pa);
1472 #endif
1473 	}
1474 #endif
1475 #ifdef DEBUG
1476 	if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
1477 		pmap_check_wiring("enter", trunc_page((vaddr_t)pte));
1478 #endif
1479 
1480 	return 0;
1481 }
1482 
1483 void
1484 pmap_kenter_pa(va, pa, prot)
1485 	vaddr_t va;
1486 	paddr_t pa;
1487 	vm_prot_t prot;
1488 {
1489 	struct pmap *pmap = pmap_kernel();
1490 	pt_entry_t *pte;
1491 	int s, npte;
1492 
1493 	PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
1494 	    ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
1495 
1496 	/*
1497 	 * Segment table entry not valid, we need a new PT page
1498 	 */
1499 
1500 	if (!pmap_ste_v(pmap, va)) {
1501 		s = splvm();
1502 		pmap_enter_ptpage(pmap, va);
1503 		splx(s);
1504 	}
1505 
1506 	pa = m68k_trunc_page(pa);
1507 	pte = pmap_pte(pmap, va);
1508 
1509 	PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
1510 	KASSERT(!pmap_pte_v(pte));
1511 
1512 	/*
1513 	 * Increment counters
1514 	 */
1515 
1516 	pmap->pm_stats.resident_count++;
1517 	pmap->pm_stats.wired_count++;
1518 
1519 	/*
1520 	 * Build the new PTE.
1521 	 */
1522 
1523 	npte = pa | pte_prot(pmap, prot) | PG_V | PG_W;
1524 #if defined(M68040) || defined(M68060)
1525 #if defined(M68020) || defined(M68030)
1526 	if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW)
1527 #else
1528 	if ((npte & PG_PROT) == PG_RW)
1529 #endif
1530 		npte |= PG_CCB;
1531 
1532 	if (mmutype == MMU_68040) {
1533 		DCFP(pa);
1534 		ICPP(pa);
1535 	}
1536 #endif
1537 
1538 	*pte = npte;
1539 	TBIS(va);
1540 }
1541 
1542 void
1543 pmap_kremove(va, size)
1544 	vaddr_t va;
1545 	vsize_t size;
1546 {
1547 	struct pmap *pmap = pmap_kernel();
1548 	pt_entry_t *pte;
1549 	vaddr_t nssva;
1550 	vaddr_t eva = va + size;
1551 #ifdef M68K_MMU_HP
1552 	boolean_t firstpage, needcflush;
1553 #endif
1554 
1555 	PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
1556 	    ("pmap_kremove(%lx, %lx)\n", va, size));
1557 
1558 #ifdef M68K_MMU_HP
1559 	firstpage = TRUE;
1560 	needcflush = FALSE;
1561 #endif
1562 	while (va < eva) {
1563 		nssva = m68k_trunc_seg(va) + NBSEG;
1564 		if (nssva == 0 || nssva > eva)
1565 			nssva = eva;
1566 
1567 		/*
1568 		 * If VA belongs to an unallocated segment,
1569 		 * skip to the next segment boundary.
1570 		 */
1571 
1572 		if (!pmap_ste_v(pmap, va)) {
1573 			va = nssva;
1574 			continue;
1575 		}
1576 
1577 		/*
1578 		 * Invalidate every valid mapping within this segment.
1579 		 */
1580 
1581 		pte = pmap_pte(pmap, va);
1582 		while (va < nssva) {
1583 			if (!pmap_pte_v(pte)) {
1584 				pte++;
1585 				va += NBPG;
1586 				continue;
1587 			}
1588 #ifdef M68K_MMU_HP
1589 			if (pmap_aliasmask) {
1590 
1591 				/*
1592 				 * Purge kernel side of VAC to ensure
1593 				 * we get the correct state of any
1594 				 * hardware maintained bits.
1595 				 */
1596 
1597 				if (firstpage) {
1598 					DCIS();
1599 					firstpage = FALSE;
1600 				}
1601 
1602 				/*
1603 				 * Remember if we may need to
1604 				 * flush the VAC.
1605 				 */
1606 
1607 				needcflush = TRUE;
1608 			}
1609 #endif
1610 			pmap->pm_stats.wired_count--;
1611 			pmap->pm_stats.resident_count--;
1612 			*pte = PG_NV;
1613 			TBIS(va);
1614 			pte++;
1615 			va += NBPG;
1616 		}
1617 	}
1618 
1619 #ifdef M68K_MMU_HP
1620 
1621 	/*
1622 	 * In a couple of cases, we don't need to worry about flushing
1623 	 * the VAC:
1624 	 * 	1. if this is a kernel mapping,
1625 	 *	   we have already done it
1626 	 *	2. if it is a user mapping not for the current process,
1627 	 *	   it won't be there
1628 	 */
1629 
1630 	if (pmap_aliasmask && !active_user_pmap(pmap))
1631 		needcflush = FALSE;
1632 	if (needcflush) {
1633 		if (pmap == pmap_kernel()) {
1634 			DCIS();
1635 		} else {
1636 			DCIU();
1637 		}
1638 	}
1639 #endif
1640 }
1641 
1642 /*
1643  * pmap_unwire:			[ INTERFACE ]
1644  *
1645  *	Clear the wired attribute for a map/virtual-address pair.
1646  *
1647  *	The mapping must already exist in the pmap.
1648  */
1649 void
1650 pmap_unwire(pmap, va)
1651 	pmap_t		pmap;
1652 	vaddr_t		va;
1653 {
1654 	pt_entry_t *pte;
1655 
1656 	PMAP_DPRINTF(PDB_FOLLOW,
1657 	    ("pmap_unwire(%p, %lx)\n", pmap, va));
1658 
1659 	pte = pmap_pte(pmap, va);
1660 
1661 	/*
1662 	 * If wiring actually changed (always?) clear the wire bit and
1663 	 * update the wire count.  Note that wiring is not a hardware
1664 	 * characteristic so there is no need to invalidate the TLB.
1665 	 */
1666 
1667 	if (pmap_pte_w_chg(pte, 0)) {
1668 		pmap_pte_set_w(pte, FALSE);
1669 		pmap->pm_stats.wired_count--;
1670 	}
1671 }
1672 
1673 /*
1674  * pmap_extract:		[ INTERFACE ]
1675  *
1676  *	Extract the physical address associated with the given
1677  *	pmap/virtual address pair.
1678  */
1679 boolean_t
1680 pmap_extract(pmap, va, pap)
1681 	pmap_t	pmap;
1682 	vaddr_t va;
1683 	paddr_t *pap;
1684 {
1685 	boolean_t rv = FALSE;
1686 	paddr_t pa;
1687 	u_int pte;
1688 
1689 	PMAP_DPRINTF(PDB_FOLLOW,
1690 	    ("pmap_extract(%p, %lx) -> ", pmap, va));
1691 
1692 	if (pmap_ste_v(pmap, va)) {
1693 		pte = *(u_int *)pmap_pte(pmap, va);
1694 		if (pte) {
1695 			pa = (pte & PG_FRAME) | (va & ~PG_FRAME);
1696 			if (pap != NULL)
1697 				*pap = pa;
1698 			rv = TRUE;
1699 		}
1700 	}
1701 #ifdef DEBUG
1702 	if (pmapdebug & PDB_FOLLOW) {
1703 		if (rv)
1704 			printf("%lx\n", pa);
1705 		else
1706 			printf("failed\n");
1707 	}
1708 #endif
1709 	return (rv);
1710 }
1711 
1712 /*
1713  * pmap_copy:		[ INTERFACE ]
1714  *
1715  *	Copy the mapping range specified by src_addr/len
1716  *	from the source map to the range dst_addr/len
1717  *	in the destination map.
1718  *
1719  *	This routine is only advisory and need not do anything.
1720  */
1721 void
1722 pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1723 	pmap_t		dst_pmap;
1724 	pmap_t		src_pmap;
1725 	vaddr_t		dst_addr;
1726 	vsize_t		len;
1727 	vaddr_t		src_addr;
1728 {
1729 
1730 	PMAP_DPRINTF(PDB_FOLLOW,
1731 	    ("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
1732 	    dst_pmap, src_pmap, dst_addr, len, src_addr));
1733 }
1734 
1735 /*
1736  * pmap_collect:		[ INTERFACE ]
1737  *
1738  *	Garbage collects the physical map system for pages which are no
1739  *	longer used.  Success need not be guaranteed -- that is, there
1740  *	may well be pages which are not referenced, but others may be
1741  *	collected.
1742  *
1743  *	Called by the pageout daemon when pages are scarce.
1744  */
1745 void
1746 pmap_collect(pmap)
1747 	pmap_t		pmap;
1748 {
1749 
1750 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_collect(%p)\n", pmap));
1751 
1752 	if (pmap == pmap_kernel()) {
1753 		int bank, s;
1754 
1755 		/*
1756 		 * XXX This is very bogus.  We should handle kernel PT
1757 		 * XXX pages much differently.
1758 		 */
1759 
1760 		s = splvm();
1761 		for (bank = 0; bank < vm_nphysseg; bank++)
1762 			pmap_collect1(pmap, ptoa(vm_physmem[bank].start),
1763 			    ptoa(vm_physmem[bank].end));
1764 		splx(s);
1765 	} else {
1766 		/*
1767 		 * This process is about to be swapped out; free all of
1768 		 * the PT pages by removing the physical mappings for its
1769 		 * entire address space.  Note: pmap_remove() performs
1770 		 * all necessary locking.
1771 		 */
1772 		pmap_do_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS, 0);
1773 		pmap_update(pmap);
1774 	}
1775 
1776 #ifdef notyet
1777 	/* Go compact and garbage-collect the pv_table. */
1778 	pmap_collect_pv();
1779 #endif
1780 }
1781 
1782 /*
1783  * pmap_collect1():
1784  *
1785  *	Garbage-collect KPT pages.  Helper for the above (bogus)
1786  *	pmap_collect().
1787  *
1788  *	Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER
1789  *	WAY OF HANDLING PT PAGES!
1790  */
1791 void
1792 pmap_collect1(pmap, startpa, endpa)
1793 	pmap_t		pmap;
1794 	paddr_t		startpa, endpa;
1795 {
1796 	paddr_t pa;
1797 	struct pv_entry *pv;
1798 	pt_entry_t *pte;
1799 	paddr_t kpa;
1800 #ifdef DEBUG
1801 	st_entry_t *ste;
1802 	int opmapdebug = 0;
1803 #endif
1804 
1805 	for (pa = startpa; pa < endpa; pa += NBPG) {
1806 		struct kpt_page *kpt, **pkpt;
1807 
1808 		/*
1809 		 * Locate physical pages which are being used as kernel
1810 		 * page table pages.
1811 		 */
1812 
1813 		pv = pa_to_pvh(pa);
1814 		if (pv->pv_pmap != pmap_kernel() || !(pv->pv_flags & PV_PTPAGE))
1815 			continue;
1816 		do {
1817 			if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
1818 				break;
1819 		} while ((pv = pv->pv_next));
1820 		if (pv == NULL)
1821 			continue;
1822 #ifdef DEBUG
1823 		if (pv->pv_va < (vaddr_t)Sysmap ||
1824 		    pv->pv_va >= (vaddr_t)Sysmap + M68K_MAX_PTSIZE) {
1825 			printf("collect: kernel PT VA out of range\n");
1826 			pmap_pvdump(pa);
1827 			continue;
1828 		}
1829 #endif
1830 		pte = (pt_entry_t *)(pv->pv_va + NBPG);
1831 		while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
1832 			;
1833 		if (pte >= (pt_entry_t *)pv->pv_va)
1834 			continue;
1835 
1836 #ifdef DEBUG
1837 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
1838 			printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
1839 			       pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
1840 			opmapdebug = pmapdebug;
1841 			pmapdebug |= PDB_PTPAGE;
1842 		}
1843 
1844 		ste = pv->pv_ptste;
1845 #endif
1846 		/*
1847 		 * If all entries were invalid we can remove the page.
1848 		 * We call pmap_remove_entry to take care of invalidating
1849 		 * ST and Sysptmap entries.
1850 		 */
1851 
1852 		(void) pmap_extract(pmap, pv->pv_va, &kpa);
1853 		pmap_remove_mapping(pmap, pv->pv_va, NULL,
1854 		    PRM_TFLUSH|PRM_CFLUSH);
1855 
1856 		/*
1857 		 * Use the physical address to locate the original
1858 		 * (kmem_alloc assigned) address for the page and put
1859 		 * that page back on the free list.
1860 		 */
1861 
1862 		for (pkpt = &kpt_used_list, kpt = *pkpt;
1863 		     kpt != NULL;
1864 		     pkpt = &kpt->kpt_next, kpt = *pkpt)
1865 			if (kpt->kpt_pa == kpa)
1866 				break;
1867 #ifdef DEBUG
1868 		if (kpt == NULL)
1869 			panic("pmap_collect: lost a KPT page");
1870 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1871 			printf("collect: %lx (%lx) to free list\n",
1872 			       kpt->kpt_va, kpa);
1873 #endif
1874 		*pkpt = kpt->kpt_next;
1875 		kpt->kpt_next = kpt_free_list;
1876 		kpt_free_list = kpt;
1877 #ifdef DEBUG
1878 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1879 			pmapdebug = opmapdebug;
1880 
1881 		if (*ste != SG_NV)
1882 			printf("collect: kernel STE at %p still valid (%x)\n",
1883 			       ste, *ste);
1884 		ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
1885 		if (*ste != SG_NV)
1886 			printf("collect: kernel PTmap at %p still valid (%x)\n",
1887 			       ste, *ste);
1888 #endif
1889 	}
1890 }
1891 
1892 /*
1893  * pmap_zero_page:		[ INTERFACE ]
1894  *
1895  *	Zero the specified (machine independent) page by mapping the page
1896  *	into virtual memory and using memset to clear its contents, one
1897  *	machine dependent page at a time.
1898  *
1899  *	Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
1900  *	      (Actually, we go to splvm(), and since we don't
1901  *	      support multiple processors, this is sufficient.)
1902  */
1903 void
1904 pmap_zero_page(phys)
1905 	paddr_t phys;
1906 {
1907 	int npte;
1908 
1909 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
1910 
1911 	npte = phys | PG_V;
1912 #ifdef M68K_MMU_HP
1913 	if (pmap_aliasmask) {
1914 
1915 		/*
1916 		 * Cache-inhibit the mapping on VAC machines, as we would
1917 		 * be wasting the cache load.
1918 		 */
1919 
1920 		npte |= PG_CI;
1921 	}
1922 #endif
1923 
1924 #if defined(M68040) || defined(M68060)
1925 #if defined(M68020) || defined(M68030)
1926 	if (mmutype == MMU_68040)
1927 #endif
1928 	{
1929 		/*
1930 		 * Set copyback caching on the page; this is required
1931 		 * for cache consistency (since regular mappings are
1932 		 * copyback as well).
1933 		 */
1934 
1935 		npte |= PG_CCB;
1936 	}
1937 #endif
1938 
1939 	*caddr1_pte = npte;
1940 	TBIS((vaddr_t)CADDR1);
1941 
1942 	zeropage(CADDR1);
1943 
1944 #ifdef DEBUG
1945 	*caddr1_pte = PG_NV;
1946 	TBIS((vaddr_t)CADDR1);
1947 #endif
1948 }
1949 
1950 /*
1951  * pmap_copy_page:		[ INTERFACE ]
1952  *
1953  *	Copy the specified (machine independent) page by mapping the page
1954  *	into virtual memory and using memcpy to copy the page, one machine
1955  *	dependent page at a time.
1956  *
1957  *	Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
1958  *	      (Actually, we go to splvm(), and since we don't
1959  *	      support multiple processors, this is sufficient.)
1960  */
1961 void
1962 pmap_copy_page(src, dst)
1963 	paddr_t src, dst;
1964 {
1965 	int npte1, npte2;
1966 
1967 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst));
1968 
1969 	npte1 = src | PG_RO | PG_V;
1970 	npte2 = dst | PG_V;
1971 #ifdef M68K_MMU_HP
1972 	if (pmap_aliasmask) {
1973 
1974 		/*
1975 		 * Cache-inhibit the mapping on VAC machines, as we would
1976 		 * be wasting the cache load.
1977 		 */
1978 
1979 		npte1 |= PG_CI;
1980 		npte2 |= PG_CI;
1981 	}
1982 #endif
1983 
1984 #if defined(M68040) || defined(M68060)
1985 #if defined(M68020) || defined(M68030)
1986 	if (mmutype == MMU_68040)
1987 #endif
1988 	{
1989 		/*
1990 		 * Set copyback caching on the pages; this is required
1991 		 * for cache consistency (since regular mappings are
1992 		 * copyback as well).
1993 		 */
1994 
1995 		npte1 |= PG_CCB;
1996 		npte2 |= PG_CCB;
1997 	}
1998 #endif
1999 
2000 	*caddr1_pte = npte1;
2001 	TBIS((vaddr_t)CADDR1);
2002 
2003 	*caddr2_pte = npte2;
2004 	TBIS((vaddr_t)CADDR2);
2005 
2006 	copypage(CADDR1, CADDR2);
2007 
2008 #ifdef DEBUG
2009 	*caddr1_pte = PG_NV;
2010 	TBIS((vaddr_t)CADDR1);
2011 
2012 	*caddr2_pte = PG_NV;
2013 	TBIS((vaddr_t)CADDR2);
2014 #endif
2015 }
2016 
2017 /*
2018  * pmap_clear_modify:		[ INTERFACE ]
2019  *
2020  *	Clear the modify bits on the specified physical page.
2021  */
2022 boolean_t
2023 pmap_clear_modify(pg)
2024 	struct vm_page *pg;
2025 {
2026 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2027 
2028 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", pg));
2029 
2030 	return pmap_changebit(pa, 0, ~PG_M);
2031 }
2032 
2033 /*
2034  * pmap_clear_reference:	[ INTERFACE ]
2035  *
2036  *	Clear the reference bit on the specified physical page.
2037  */
2038 boolean_t
2039 pmap_clear_reference(pg)
2040 	struct vm_page *pg;
2041 {
2042 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2043 
2044 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%p)\n", pg));
2045 
2046 	return pmap_changebit(pa, 0, ~PG_U);
2047 }
2048 
2049 /*
2050  * pmap_is_referenced:		[ INTERFACE ]
2051  *
2052  *	Return whether or not the specified physical page is referenced
2053  *	by any physical maps.
2054  */
2055 boolean_t
2056 pmap_is_referenced(pg)
2057 	struct vm_page *pg;
2058 {
2059 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2060 
2061 	return (pmap_testbit(pa, PG_U));
2062 }
2063 
2064 /*
2065  * pmap_is_modified:		[ INTERFACE ]
2066  *
2067  *	Return whether or not the specified physical page is modified
2068  *	by any physical maps.
2069  */
2070 boolean_t
2071 pmap_is_modified(pg)
2072 	struct vm_page *pg;
2073 {
2074 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2075 
2076 	return (pmap_testbit(pa, PG_M));
2077 }
2078 
2079 /*
2080  * pmap_phys_address:		[ INTERFACE ]
2081  *
2082  *	Return the physical address corresponding to the specified
2083  *	cookie.  Used by the device pager to decode a device driver's
2084  *	mmap entry point return value.
2085  *
2086  *	Note: no locking is necessary in this function.
2087  */
2088 paddr_t
2089 pmap_phys_address(ppn)
2090 	int ppn;
2091 {
2092 	return(m68k_ptob(ppn));
2093 }
2094 
2095 #ifdef M68K_MMU_HP
2096 /*
2097  * pmap_prefer:			[ INTERFACE ]
2098  *
2099  *	Find the first virtual address >= *vap that does not
2100  *	cause a virtually-addressed cache alias problem.
2101  */
2102 void
2103 pmap_prefer(foff, vap)
2104 	vaddr_t foff, *vap;
2105 {
2106 	vaddr_t va;
2107 	vsize_t d;
2108 
2109 #ifdef M68K_MMU_MOTOROLA
2110 	if (pmap_aliasmask)
2111 #endif
2112 	{
2113 		va = *vap;
2114 		d = foff - va;
2115 		d &= pmap_aliasmask;
2116 		*vap = va + d;
2117 	}
2118 }
2119 #endif /* M68K_MMU_HP */
2120 
2121 #ifdef COMPAT_HPUX
2122 /*
2123  * pmap_mapmulti:
2124  *
2125  *	'PUX hack for dealing with the so called multi-mapped address space.
2126  *	The first 256mb is mapped in at every 256mb region from 0x10000000
2127  *	up to 0xF0000000.  This allows for 15 bits of tag information.
2128  *
2129  *	We implement this at the segment table level, the machine independent
2130  *	VM knows nothing about it.
2131  */
2132 int
2133 pmap_mapmulti(pmap, va)
2134 	struct pmap *pmap;
2135 	vaddr_t va;
2136 {
2137 	st_entry_t *ste, *bste;
2138 
2139 #ifdef DEBUG
2140 	if (pmapdebug & PDB_MULTIMAP) {
2141 		ste = pmap_ste(pmap, HPMMBASEADDR(va));
2142 		printf("pmap_mapmulti(%p, %lx): bste %p(%x)",
2143 		       pmap, va, ste, *ste);
2144 		ste = pmap_ste(pmap, va);
2145 		printf(" ste %p(%x)\n", ste, *ste);
2146 	}
2147 #endif
2148 	bste = pmap_ste(pmap, HPMMBASEADDR(va));
2149 	ste = pmap_ste(pmap, va);
2150 	if (*ste == SG_NV && (*bste & SG_V)) {
2151 		*ste = *bste;
2152 		TBIAU();
2153 		return 0;
2154 	}
2155 	return EFAULT;
2156 }
2157 #endif /* COMPAT_HPUX */
2158 
2159 /*
2160  * Miscellaneous support routines follow
2161  */
2162 
2163 /*
2164  * pmap_remove_mapping:
2165  *
2166  *	Invalidate a single page denoted by pmap/va.
2167  *
2168  *	If (pte != NULL), it is the already computed PTE for the page.
2169  *
2170  *	If (flags & PRM_TFLUSH), we must invalidate any TLB information.
2171  *
2172  *	If (flags & PRM_CFLUSH), we must flush/invalidate any cache
2173  *	information.
2174  *
2175  *	If (flags & PRM_KEEPPTPAGE), we don't free the page table page
2176  *	if the reference drops to zero.
2177  */
2178 /* static */
2179 void
2180 pmap_remove_mapping(pmap, va, pte, flags)
2181 	struct pmap *pmap;
2182 	vaddr_t va;
2183 	pt_entry_t *pte;
2184 	int flags;
2185 {
2186 	paddr_t pa;
2187 	struct pv_entry *pv, *npv;
2188 	struct pmap *ptpmap;
2189 	st_entry_t *ste;
2190 	int s, bits;
2191 #ifdef DEBUG
2192 	pt_entry_t opte;
2193 #endif
2194 
2195 	PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
2196 	    ("pmap_remove_mapping(%p, %lx, %p, %x)\n",
2197 	    pmap, va, pte, flags));
2198 
2199 	/*
2200 	 * PTE not provided, compute it from pmap and va.
2201 	 */
2202 
2203 	if (pte == NULL) {
2204 		pte = pmap_pte(pmap, va);
2205 		if (*pte == PG_NV)
2206 			return;
2207 	}
2208 
2209 #ifdef M68K_MMU_HP
2210 	if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
2211 
2212 		/*
2213 		 * Purge kernel side of VAC to ensure we get the correct
2214 		 * state of any hardware maintained bits.
2215 		 */
2216 
2217 		DCIS();
2218 
2219 		/*
2220 		 * If this is a non-CI user mapping for the current process,
2221 		 * flush the VAC.  Note that the kernel side was flushed
2222 		 * above so we don't worry about non-CI kernel mappings.
2223 		 */
2224 
2225 		if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) {
2226 			DCIU();
2227 		}
2228 	}
2229 #endif
2230 
2231 	pa = pmap_pte_pa(pte);
2232 #ifdef DEBUG
2233 	opte = *pte;
2234 #endif
2235 
2236 	/*
2237 	 * Update statistics
2238 	 */
2239 
2240 	if (pmap_pte_w(pte))
2241 		pmap->pm_stats.wired_count--;
2242 	pmap->pm_stats.resident_count--;
2243 
2244 #if defined(M68040) || defined(M68060)
2245 #if defined(M68020) || defined(M68030)
2246 	if (mmutype == MMU_68040)
2247 #endif
2248 	if ((flags & PRM_CFLUSH)) {
2249 		DCFP(pa);
2250 		ICPP(pa);
2251 	}
2252 #endif
2253 
2254 	/*
2255 	 * Invalidate the PTE after saving the reference modify info.
2256 	 */
2257 
2258 	PMAP_DPRINTF(PDB_REMOVE, ("remove: invalidating pte at %p\n", pte));
2259 	bits = *pte & (PG_U|PG_M);
2260 	*pte = PG_NV;
2261 	if ((flags & PRM_TFLUSH) && active_pmap(pmap))
2262 		TBIS(va);
2263 
2264 	/*
2265 	 * For user mappings decrement the wiring count on
2266 	 * the PT page.
2267 	 */
2268 
2269 	if (pmap != pmap_kernel()) {
2270 		vaddr_t ptpva = trunc_page((vaddr_t)pte);
2271 		int refs = pmap_ptpage_delref(ptpva);
2272 #ifdef DEBUG
2273 		if (pmapdebug & PDB_WIRING)
2274 			pmap_check_wiring("remove", ptpva);
2275 #endif
2276 
2277 		/*
2278 		 * If reference count drops to 0, and we're not instructed
2279 		 * to keep it around, free the PT page.
2280 		 */
2281 
2282 		if (refs == 0 && (flags & PRM_KEEPPTPAGE) == 0) {
2283 #ifdef DIAGNOSTIC
2284 			struct pv_entry *pv;
2285 #endif
2286 			paddr_t pa;
2287 
2288 			pa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
2289 #ifdef DIAGNOSTIC
2290 			if (PAGE_IS_MANAGED(pa) == 0)
2291 				panic("pmap_remove_mapping: unmanaged PT page");
2292 			pv = pa_to_pvh(pa);
2293 			if (pv->pv_ptste == NULL)
2294 				panic("pmap_remove_mapping: ptste == NULL");
2295 			if (pv->pv_pmap != pmap_kernel() ||
2296 			    pv->pv_va != ptpva ||
2297 			    pv->pv_next != NULL)
2298 				panic("pmap_remove_mapping: "
2299 				    "bad PT page pmap %p, va 0x%lx, next %p",
2300 				    pv->pv_pmap, pv->pv_va, pv->pv_next);
2301 #endif
2302 			pmap_remove_mapping(pmap_kernel(), ptpva,
2303 			    NULL, PRM_TFLUSH|PRM_CFLUSH);
2304 			uvm_pagefree(PHYS_TO_VM_PAGE(pa));
2305 			PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
2306 			    ("remove: PT page 0x%lx (0x%lx) freed\n",
2307 			    ptpva, pa));
2308 		}
2309 	}
2310 
2311 	/*
2312 	 * If this isn't a managed page, we are all done.
2313 	 */
2314 
2315 	if (PAGE_IS_MANAGED(pa) == 0)
2316 		return;
2317 
2318 	/*
2319 	 * Otherwise remove it from the PV table
2320 	 * (raise IPL since we may be called at interrupt time).
2321 	 */
2322 
2323 	pv = pa_to_pvh(pa);
2324 	ste = NULL;
2325 	s = splvm();
2326 
2327 	/*
2328 	 * If it is the first entry on the list, it is actually
2329 	 * in the header and we must copy the following entry up
2330 	 * to the header.  Otherwise we must search the list for
2331 	 * the entry.  In either case we free the now unused entry.
2332 	 */
2333 
2334 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
2335 		ste = pv->pv_ptste;
2336 		ptpmap = pv->pv_ptpmap;
2337 		npv = pv->pv_next;
2338 		if (npv) {
2339 			npv->pv_flags = pv->pv_flags;
2340 			*pv = *npv;
2341 			pmap_free_pv(npv);
2342 		} else
2343 			pv->pv_pmap = NULL;
2344 	} else {
2345 		for (npv = pv->pv_next; npv; npv = npv->pv_next) {
2346 			if (pmap == npv->pv_pmap && va == npv->pv_va)
2347 				break;
2348 			pv = npv;
2349 		}
2350 #ifdef DEBUG
2351 		if (npv == NULL)
2352 			panic("pmap_remove: PA not in pv_tab");
2353 #endif
2354 		ste = npv->pv_ptste;
2355 		ptpmap = npv->pv_ptpmap;
2356 		pv->pv_next = npv->pv_next;
2357 		pmap_free_pv(npv);
2358 		pv = pa_to_pvh(pa);
2359 	}
2360 
2361 #ifdef M68K_MMU_HP
2362 
2363 	/*
2364 	 * If only one mapping left we no longer need to cache inhibit
2365 	 */
2366 
2367 	if (pmap_aliasmask &&
2368 	    pv->pv_pmap && pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
2369 		PMAP_DPRINTF(PDB_CACHE,
2370 		    ("remove: clearing CI for pa %lx\n", pa));
2371 		pv->pv_flags &= ~PV_CI;
2372 		pmap_changebit(pa, 0, ~PG_CI);
2373 #ifdef DEBUG
2374 		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
2375 		    (PDB_CACHE|PDB_PVDUMP))
2376 			pmap_pvdump(pa);
2377 #endif
2378 	}
2379 #endif
2380 
2381 	/*
2382 	 * If this was a PT page we must also remove the
2383 	 * mapping from the associated segment table.
2384 	 */
2385 
2386 	if (ste) {
2387 		PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
2388 		    ("remove: ste was %x@%p pte was %x@%p\n",
2389 		    *ste, ste, opte, pmap_pte(pmap, va)));
2390 #if defined(M68040) || defined(M68060)
2391 #if defined(M68020) || defined(M68030)
2392 		if (mmutype == MMU_68040)
2393 #endif
2394 		{
2395 			st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE];
2396 
2397 			while (ste < este)
2398 				*ste++ = SG_NV;
2399 #ifdef DEBUG
2400 			ste -= NPTEPG/SG4_LEV3SIZE;
2401 #endif
2402 		}
2403 #if defined(M68020) || defined(M68030)
2404 		else
2405 #endif
2406 #endif
2407 #if defined(M68020) || defined(M68030)
2408 		*ste = SG_NV;
2409 #endif
2410 
2411 		/*
2412 		 * If it was a user PT page, we decrement the
2413 		 * reference count on the segment table as well,
2414 		 * freeing it if it is now empty.
2415 		 */
2416 
2417 		if (ptpmap != pmap_kernel()) {
2418 			PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
2419 			    ("remove: stab %p, refcnt %d\n",
2420 			    ptpmap->pm_stab, ptpmap->pm_sref - 1));
2421 #ifdef DEBUG
2422 			if ((pmapdebug & PDB_PARANOIA) &&
2423 			    ptpmap->pm_stab !=
2424 			     (st_entry_t *)trunc_page((vaddr_t)ste))
2425 				panic("remove: bogus ste");
2426 #endif
2427 			if (--(ptpmap->pm_sref) == 0) {
2428 				PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
2429 				    ("remove: free stab %p\n",
2430 				    ptpmap->pm_stab));
2431 				pmap_remove(pmap_kernel(),
2432 				    (vaddr_t)ptpmap->pm_stab,
2433 				    (vaddr_t)ptpmap->pm_stab + M68K_STSIZE);
2434 				uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t)
2435 							     ptpmap->pm_stpa));
2436 				uvm_km_free_wakeup(st_map,
2437 						 (vaddr_t)ptpmap->pm_stab,
2438 						 M68K_STSIZE);
2439 				ptpmap->pm_stab = Segtabzero;
2440 				ptpmap->pm_stpa = Segtabzeropa;
2441 #if defined(M68040) || defined(M68060)
2442 #if defined(M68020) || defined(M68030)
2443 				if (mmutype == MMU_68040)
2444 #endif
2445 					ptpmap->pm_stfree = protostfree;
2446 #endif
2447 
2448 				/*
2449 				 * XXX may have changed segment table
2450 				 * pointer for current process so
2451 				 * update now to reload hardware.
2452 				 */
2453 
2454 				if (active_user_pmap(ptpmap))
2455 					PMAP_ACTIVATE(ptpmap, 1);
2456 			}
2457 		}
2458 		pv->pv_flags &= ~PV_PTPAGE;
2459 		ptpmap->pm_ptpages--;
2460 	}
2461 
2462 	/*
2463 	 * Update saved attributes for managed page
2464 	 */
2465 
2466 	*pa_to_attribute(pa) |= bits;
2467 	splx(s);
2468 }
2469 
2470 /*
2471  * pmap_testbit:
2472  *
2473  *	Test the modified/referenced bits of a physical page.
2474  */
2475 /* static */
2476 boolean_t
2477 pmap_testbit(pa, bit)
2478 	paddr_t pa;
2479 	int bit;
2480 {
2481 	struct pv_entry *pv;
2482 	pt_entry_t *pte;
2483 	int s;
2484 
2485 	pv = pa_to_pvh(pa);
2486 	s = splvm();
2487 
2488 	/*
2489 	 * Check saved info first
2490 	 */
2491 
2492 	if (*pa_to_attribute(pa) & bit) {
2493 		splx(s);
2494 		return(TRUE);
2495 	}
2496 
2497 #ifdef M68K_MMU_HP
2498 
2499 	/*
2500 	 * Flush VAC to get correct state of any hardware maintained bits.
2501 	 */
2502 
2503 	if (pmap_aliasmask && (bit & (PG_U|PG_M)))
2504 		DCIS();
2505 #endif
2506 
2507 	/*
2508 	 * Not found.  Check current mappings, returning immediately if
2509 	 * found.  Cache a hit to speed future lookups.
2510 	 */
2511 
2512 	if (pv->pv_pmap != NULL) {
2513 		for (; pv; pv = pv->pv_next) {
2514 			pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2515 			if (*pte & bit) {
2516 				*pa_to_attribute(pa) |= bit;
2517 				splx(s);
2518 				return (TRUE);
2519 			}
2520 		}
2521 	}
2522 	splx(s);
2523 	return (FALSE);
2524 }
2525 
2526 /*
2527  * pmap_changebit:
2528  *
2529  *	Change the modified/referenced bits, or other PTE bits,
2530  *	for a physical page.
2531  */
2532 /* static */
2533 boolean_t
2534 pmap_changebit(pa, set, mask)
2535 	paddr_t pa;
2536 	int set, mask;
2537 {
2538 	struct pv_entry *pv;
2539 	pt_entry_t *pte, npte;
2540 	vaddr_t va;
2541 	char *attrp;
2542 	int s;
2543 #if defined(M68K_MMU_HP) || defined(M68040) || defined(M68060)
2544 	boolean_t firstpage = TRUE;
2545 #endif
2546 	boolean_t r;
2547 
2548 	PMAP_DPRINTF(PDB_BITS,
2549 	    ("pmap_changebit(%lx, %x, %x)\n", pa, set, mask));
2550 
2551 	pv = pa_to_pvh(pa);
2552 	s = splvm();
2553 
2554 	/*
2555 	 * Clear saved attributes (modify, reference)
2556 	 */
2557 
2558 	attrp = pa_to_attribute(pa);
2559 	r = *attrp & ~mask;
2560 	*attrp &= mask;
2561 
2562 	/*
2563 	 * Loop over all current mappings setting/clearing as appropos
2564 	 * If setting RO do we need to clear the VAC?
2565 	 */
2566 
2567 	if (pv->pv_pmap != NULL) {
2568 #ifdef DEBUG
2569 		int toflush = 0;
2570 #endif
2571 		for (; pv; pv = pv->pv_next) {
2572 #ifdef DEBUG
2573 			toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
2574 #endif
2575 			va = pv->pv_va;
2576 			pte = pmap_pte(pv->pv_pmap, va);
2577 #ifdef M68K_MMU_HP
2578 
2579 			/*
2580 			 * Flush VAC to ensure we get correct state of HW bits
2581 			 * so we don't clobber them.
2582 			 */
2583 
2584 			if (firstpage && pmap_aliasmask) {
2585 				firstpage = FALSE;
2586 				DCIS();
2587 			}
2588 #endif
2589 			npte = (*pte | set) & mask;
2590 			if (*pte != npte) {
2591 				r = TRUE;
2592 #if defined(M68040) || defined(M68060)
2593 				/*
2594 				 * If we are changing caching status or
2595 				 * protection make sure the caches are
2596 				 * flushed (but only once).
2597 				 */
2598 				if (firstpage &&
2599 #if defined(M68020) || defined(M68030)
2600 				    (mmutype == MMU_68040) &&
2601 #endif
2602 				    ((set == PG_RO) ||
2603 				     (set & PG_CMASK) ||
2604 				     (mask & PG_CMASK) == 0)) {
2605 					firstpage = FALSE;
2606 					DCFP(pa);
2607 					ICPP(pa);
2608 				}
2609 #endif
2610 				*pte = npte;
2611 				if (active_pmap(pv->pv_pmap))
2612 					TBIS(va);
2613 			}
2614 		}
2615 	}
2616 	splx(s);
2617 	return(r);
2618 }
2619 
2620 /*
2621  * pmap_enter_ptpage:
2622  *
2623  *	Allocate and map a PT page for the specified pmap/va pair.
2624  */
2625 /* static */
2626 void
2627 pmap_enter_ptpage(pmap, va)
2628 	struct pmap *pmap;
2629 	vaddr_t va;
2630 {
2631 	paddr_t ptpa;
2632 	struct vm_page *pg;
2633 	struct pv_entry *pv;
2634 	st_entry_t *ste;
2635 	int s;
2636 
2637 	PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE,
2638 	    ("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va));
2639 
2640 	/*
2641 	 * Allocate a segment table if necessary.  Note that it is allocated
2642 	 * from a private map and not pt_map.  This keeps user page tables
2643 	 * aligned on segment boundaries in the kernel address space.
2644 	 * The segment table is wired down.  It will be freed whenever the
2645 	 * reference count drops to zero.
2646 	 */
2647 	if (pmap->pm_stab == Segtabzero) {
2648 		pmap->pm_stab = (st_entry_t *)
2649 			uvm_km_zalloc(st_map, M68K_STSIZE);
2650 		(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
2651 		    (paddr_t *)&pmap->pm_stpa);
2652 #if defined(M68040) || defined(M68060)
2653 #if defined(M68020) || defined(M68030)
2654 		if (mmutype == MMU_68040)
2655 #endif
2656 		{
2657 			if (pmap_changebit((paddr_t)pmap->pm_stpa,
2658 					   PG_CI, ~PG_CCB))
2659 				DCIS();
2660 			pmap->pm_stfree = protostfree;
2661 		}
2662 #endif
2663 		/*
2664 		 * XXX may have changed segment table pointer for current
2665 		 * process so update now to reload hardware.
2666 		 */
2667 		if (active_user_pmap(pmap))
2668 			PMAP_ACTIVATE(pmap, 1);
2669 
2670 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2671 		    ("enter: pmap %p stab %p(%p)\n",
2672 		    pmap, pmap->pm_stab, pmap->pm_stpa));
2673 	}
2674 
2675 	ste = pmap_ste(pmap, va);
2676 #if defined(M68040) || defined(M68060)
2677 	/*
2678 	 * Allocate level 2 descriptor block if necessary
2679 	 */
2680 #if defined(M68020) || defined(M68030)
2681 	if (mmutype == MMU_68040)
2682 #endif
2683 	{
2684 		if (*ste == SG_NV) {
2685 			int ix;
2686 			caddr_t addr;
2687 
2688 			ix = bmtol2(pmap->pm_stfree);
2689 			if (ix == -1)
2690 				panic("enter: out of address space"); /* XXX */
2691 			pmap->pm_stfree &= ~l2tobm(ix);
2692 			addr = (caddr_t)&pmap->pm_stab[ix*SG4_LEV2SIZE];
2693 			memset(addr, 0, SG4_LEV2SIZE*sizeof(st_entry_t));
2694 			addr = (caddr_t)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
2695 			*ste = (u_int)addr | SG_RW | SG_U | SG_V;
2696 
2697 			PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2698 			    ("enter: alloc ste2 %d(%p)\n", ix, addr));
2699 		}
2700 		ste = pmap_ste2(pmap, va);
2701 		/*
2702 		 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
2703 		 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
2704 		 * (16) such descriptors (NBPG/SG4_LEV3SIZE bytes) to map a
2705 		 * PT page--the unit of allocation.  We set `ste' to point
2706 		 * to the first entry of that chunk which is validated in its
2707 		 * entirety below.
2708 		 */
2709 		ste = (st_entry_t *)((int)ste & ~(NBPG/SG4_LEV3SIZE-1));
2710 
2711 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2712 		    ("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste));
2713 	}
2714 #endif
2715 	va = trunc_page((vaddr_t)pmap_pte(pmap, va));
2716 
2717 	/*
2718 	 * In the kernel we allocate a page from the kernel PT page
2719 	 * free list and map it into the kernel page table map (via
2720 	 * pmap_enter).
2721 	 */
2722 	if (pmap == pmap_kernel()) {
2723 		struct kpt_page *kpt;
2724 
2725 		s = splvm();
2726 		if ((kpt = kpt_free_list) == NULL) {
2727 			/*
2728 			 * No PT pages available.
2729 			 * Try once to free up unused ones.
2730 			 */
2731 			PMAP_DPRINTF(PDB_COLLECT,
2732 			    ("enter: no KPT pages, collecting...\n"));
2733 			pmap_collect(pmap_kernel());
2734 			if ((kpt = kpt_free_list) == NULL)
2735 				panic("pmap_enter_ptpage: can't get KPT page");
2736 		}
2737 		kpt_free_list = kpt->kpt_next;
2738 		kpt->kpt_next = kpt_used_list;
2739 		kpt_used_list = kpt;
2740 		ptpa = kpt->kpt_pa;
2741 		memset((caddr_t)kpt->kpt_va, 0, NBPG);
2742 		pmap_enter(pmap, va, ptpa, VM_PROT_READ | VM_PROT_WRITE,
2743 		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
2744 		pmap_update(pmap);
2745 #ifdef DEBUG
2746 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
2747 			int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
2748 
2749 			printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n",
2750 			       ix, Sysptmap[ix], kpt->kpt_va);
2751 		}
2752 #endif
2753 		splx(s);
2754 	} else {
2755 
2756 		/*
2757 		 * For user processes we just allocate a page from the
2758 		 * VM system.  Note that we set the page "wired" count to 1,
2759 		 * which is what we use to check if the page can be freed.
2760 		 * See pmap_remove_mapping().
2761 		 *
2762 		 * Count the segment table reference first so that we won't
2763 		 * lose the segment table when low on memory.
2764 		 */
2765 
2766 		pmap->pm_sref++;
2767 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
2768 		    ("enter: about to alloc UPT pg at %lx\n", va));
2769 		while ((pg = uvm_pagealloc(uvm.kernel_object,
2770 					   va - vm_map_min(kernel_map),
2771 					   NULL, UVM_PGA_ZERO)) == NULL) {
2772 			uvm_wait("ptpage");
2773 		}
2774 		pg->flags &= ~(PG_BUSY|PG_FAKE);
2775 		UVM_PAGE_OWN(pg, NULL);
2776 		ptpa = VM_PAGE_TO_PHYS(pg);
2777 		pmap_enter(pmap_kernel(), va, ptpa,
2778 		    VM_PROT_READ | VM_PROT_WRITE,
2779 		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
2780 		pmap_update(pmap_kernel());
2781 	}
2782 #if defined(M68040) || defined(M68060)
2783 	/*
2784 	 * Turn off copyback caching of page table pages,
2785 	 * could get ugly otherwise.
2786 	 */
2787 #if defined(M68020) || defined(M68030)
2788 	if (mmutype == MMU_68040)
2789 #endif
2790 	{
2791 #ifdef DEBUG
2792 		pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
2793 		if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
2794 			printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n",
2795 			       pmap == pmap_kernel() ? "Kernel" : "User",
2796 			       va, ptpa, pte, *pte);
2797 #endif
2798 		if (pmap_changebit(ptpa, PG_CI, ~PG_CCB))
2799 			DCIS();
2800 	}
2801 #endif
2802 	/*
2803 	 * Locate the PV entry in the kernel for this PT page and
2804 	 * record the STE address.  This is so that we can invalidate
2805 	 * the STE when we remove the mapping for the page.
2806 	 */
2807 	pv = pa_to_pvh(ptpa);
2808 	s = splvm();
2809 	if (pv) {
2810 		pv->pv_flags |= PV_PTPAGE;
2811 		do {
2812 			if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
2813 				break;
2814 		} while ((pv = pv->pv_next));
2815 	}
2816 #ifdef DEBUG
2817 	if (pv == NULL)
2818 		panic("pmap_enter_ptpage: PT page not entered");
2819 #endif
2820 	pv->pv_ptste = ste;
2821 	pv->pv_ptpmap = pmap;
2822 
2823 	PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
2824 	    ("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste));
2825 
2826 	/*
2827 	 * Map the new PT page into the segment table.
2828 	 * Also increment the reference count on the segment table if this
2829 	 * was a user page table page.  Note that we don't use vm_map_pageable
2830 	 * to keep the count like we do for PT pages, this is mostly because
2831 	 * it would be difficult to identify ST pages in pmap_pageable to
2832 	 * release them.  We also avoid the overhead of vm_map_pageable.
2833 	 */
2834 #if defined(M68040) || defined(M68060)
2835 #if defined(M68020) || defined(M68030)
2836 	if (mmutype == MMU_68040)
2837 #endif
2838 	{
2839 		st_entry_t *este;
2840 
2841 		for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
2842 			*ste = ptpa | SG_U | SG_RW | SG_V;
2843 			ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
2844 		}
2845 	}
2846 #if defined(M68020) || defined(M68030)
2847 	else
2848 		*ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
2849 #endif
2850 #else
2851 	*ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
2852 #endif
2853 	if (pmap != pmap_kernel()) {
2854 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2855 		    ("enter: stab %p refcnt %d\n",
2856 		    pmap->pm_stab, pmap->pm_sref));
2857 	}
2858 	/*
2859 	 * Flush stale TLB info.
2860 	 */
2861 	if (pmap == pmap_kernel())
2862 		TBIAS();
2863 	else
2864 		TBIAU();
2865 	pmap->pm_ptpages++;
2866 	splx(s);
2867 }
2868 
2869 /*
2870  * pmap_ptpage_addref:
2871  *
2872  *	Add a reference to the specified PT page.
2873  */
2874 void
2875 pmap_ptpage_addref(ptpva)
2876 	vaddr_t ptpva;
2877 {
2878 	struct vm_page *pg;
2879 
2880 	simple_lock(&uvm.kernel_object->vmobjlock);
2881 	pg = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
2882 	pg->wire_count++;
2883 	PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2884 	    ("ptpage addref: pg %p now %d\n",
2885 	     pg, pg->wire_count));
2886 	simple_unlock(&uvm.kernel_object->vmobjlock);
2887 }
2888 
2889 /*
2890  * pmap_ptpage_delref:
2891  *
2892  *	Delete a reference to the specified PT page.
2893  */
2894 int
2895 pmap_ptpage_delref(ptpva)
2896 	vaddr_t ptpva;
2897 {
2898 	struct vm_page *pg;
2899 	int rv;
2900 
2901 	simple_lock(&uvm.kernel_object->vmobjlock);
2902 	pg = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
2903 	rv = --pg->wire_count;
2904 	PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2905 	    ("ptpage delref: pg %p now %d\n",
2906 	     pg, pg->wire_count));
2907 	simple_unlock(&uvm.kernel_object->vmobjlock);
2908 	return (rv);
2909 }
2910 
2911 /*
2912  *	Routine:        pmap_procwr
2913  *
2914  *	Function:
2915  *		Synchronize caches corresponding to [addr, addr + len) in p.
2916  */
2917 void
2918 pmap_procwr(p, va, len)
2919 	struct proc	*p;
2920 	vaddr_t		va;
2921 	size_t		len;
2922 {
2923 	(void)cachectl1(0x80000004, va, len, p);
2924 }
2925 
2926 #ifdef mvme68k
2927 
2928 void
2929 _pmap_set_page_cacheable(pm, va)
2930 	struct pmap *pm;
2931 	vaddr_t va;
2932 {
2933 
2934 	if (!pmap_ste_v(pm, va))
2935 		return;
2936 
2937 #if defined(M68040) || defined(M68060)
2938 #if defined(M68020) || defined(M68030)
2939 	if (mmutype == MMU_68040)
2940 	{
2941 #endif
2942 	if (pmap_changebit(pmap_pte_pa(pmap_pte(pm, va)), PG_CCB, ~PG_CI))
2943 		DCIS();
2944 
2945 #if defined(M68020) || defined(M68030)
2946 	} else
2947 		pmap_changebit(pmap_pte_pa(pmap_pte(pm, va)), 0, ~PG_CI);
2948 #endif
2949 #else
2950 	pmap_changebit(pmap_pte_pa(pmap_pte(pm, va)), 0, ~PG_CI);
2951 #endif
2952 }
2953 
2954 void
2955 _pmap_set_page_cacheinhibit(pm, va)
2956 	struct pmap *pm;
2957 	vaddr_t va;
2958 {
2959 
2960 	if (!pmap_ste_v(pm, va))
2961 		return;
2962 
2963 #if defined(M68040) || defined(M68060)
2964 #if defined(M68020) || defined(M68030)
2965 	if (mmutype == MMU_68040)
2966 	{
2967 #endif
2968 	if (pmap_changebit(pmap_pte_pa(pmap_pte(pm, va)), PG_CI, ~PG_CCB))
2969 		DCIS();
2970 #if defined(M68020) || defined(M68030)
2971 	} else
2972 		pmap_changebit(pmap_pte_pa(pmap_pte(pm, va)), PG_CI, ~0);
2973 #endif
2974 #else
2975 	pmap_changebit(pmap_pte_pa(pmap_pte(pm, va)), PG_CI, ~0);
2976 #endif
2977 }
2978 
2979 int
2980 _pmap_page_is_cacheable(pm, va)
2981 	struct pmap *pm;
2982 	vaddr_t va;
2983 {
2984 
2985 	if (!pmap_ste_v(pm, va))
2986 		return (0);
2987 
2988 	return ((pmap_pte_ci(pmap_pte(pm, va)) == 0) ? 1 : 0);
2989 }
2990 
2991 #endif /* mvme68k */
2992 
2993 #ifdef DEBUG
2994 /*
2995  * pmap_pvdump:
2996  *
2997  *	Dump the contents of the PV list for the specified physical page.
2998  */
2999 void
3000 pmap_pvdump(pa)
3001 	paddr_t pa;
3002 {
3003 	struct pv_entry *pv;
3004 
3005 	printf("pa %lx", pa);
3006 	for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next)
3007 		printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p, flags %x",
3008 		       pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
3009 		       pv->pv_flags);
3010 	printf("\n");
3011 }
3012 
3013 /*
3014  * pmap_check_wiring:
3015  *
3016  *	Count the number of valid mappings in the specified PT page,
3017  *	and ensure that it is consistent with the number of wirings
3018  *	to that page that the VM system has.
3019  */
3020 void
3021 pmap_check_wiring(str, va)
3022 	char *str;
3023 	vaddr_t va;
3024 {
3025 	pt_entry_t *pte;
3026 	paddr_t pa;
3027 	struct vm_page *pg;
3028 	int count;
3029 
3030 	if (!pmap_ste_v(pmap_kernel(), va) ||
3031 	    !pmap_pte_v(pmap_pte(pmap_kernel(), va)))
3032 		return;
3033 
3034 	pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
3035 	pg = PHYS_TO_VM_PAGE(pa);
3036 	if (pg->wire_count >= PAGE_SIZE / sizeof(pt_entry_t)) {
3037 		panic("*%s*: 0x%lx: wire count %d", str, va, pg->wire_count);
3038 		return;
3039 	}
3040 
3041 	count = 0;
3042 	for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + NBPG); pte++)
3043 		if (*pte)
3044 			count++;
3045 	if (pg->wire_count != count)
3046 		panic("*%s*: 0x%lx: w%d/a%d",
3047 		       str, va, pg->wire_count, count);
3048 }
3049 #endif /* DEBUG */
3050