xref: /netbsd/sys/arch/sun2/sun2/pmap.c (revision c4a72b64)
1 /*	$NetBSD: pmap.c,v 1.18 2002/09/27 15:36:53 provos Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Adam Glass, Gordon W. Ross, and Matthew Fredette.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Some notes:
41  *
42  * sun2s have contexts (8).  In this pmap design, the kernel is mapped
43  * into context zero.  Processes take up a known portion of the context,
44  * and compete for the available contexts on a LRU basis.
45  *
46  * sun2s also have this evil "PMEG" crapola.  Essentially each "context"'s
47  * address space is defined by the 512 one-byte entries in the segment map.
48  * Each of these 1-byte entries points to a "Page Map Entry Group" (PMEG)
49  * which contains the mappings for that virtual segment.  (This strange
50  * terminology invented by Sun and preserved here for consistency.)
51  * Each PMEG maps a segment of 32Kb length, with 16 pages of 2Kb each.
52  *
53  * As you might guess, these PMEGs are in short supply and heavy demand.
54  * PMEGs allocated to the kernel are "static" in the sense that they can't
55  * be stolen from it.  PMEGs allocated to a particular segment of a
56  * pmap's virtual space will be fought over by the other pmaps.
57  *
58  * This pmap was once sys/arch/sun3/sun3/pmap.c revision 1.135.
59  */
60 
61 /*
62  * Cache management:
63  * sun2's don't have cache implementations, but for now the caching
64  * code remains in.  it's harmless (and, due to our 0 definitions of
65  * PG_NC and BADALIAS, should optimize away), and keeping it in makes
66  * it easier to diff this file against its cousin, sys/arch/sun3/sun3/pmap.c.
67  */
68 
69 /*
70  * wanted attributes:
71  *       pmegs that aren't needed by a pmap remain in the MMU.
72  *       quick context switches between pmaps
73  */
74 
75 /*
76  * Project1:  Use a "null" context for processes that have not
77  * touched any user-space address recently.  This is efficient
78  * for things that stay in the kernel for a while, waking up
79  * to handle some I/O then going back to sleep (i.e. nfsd).
80  * If and when such a process returns to user-mode, it will
81  * fault and be given a real context at that time.
82  *
83  * This also lets context switch be fast, because all we need
84  * to do there for the MMU is slam the context register.
85  *
86  * Project2:  Use a private pool of PV elements.  This pool can be
87  * fixed size because the total mapped virtual space supported by
88  * the MMU H/W (and this pmap) is fixed for all time.
89  */
90 
91 #include "opt_ddb.h"
92 
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/proc.h>
96 #include <sys/malloc.h>
97 #include <sys/pool.h>
98 #include <sys/user.h>
99 #include <sys/queue.h>
100 #include <sys/kcore.h>
101 
102 #include <uvm/uvm.h>
103 
104 #include <machine/cpu.h>
105 #include <machine/dvma.h>
106 #include <machine/idprom.h>
107 #include <machine/kcore.h>
108 #include <machine/promlib.h>
109 #include <machine/pmap.h>
110 #include <machine/pte.h>
111 #include <machine/vmparam.h>
112 
113 #include <sun2/sun2/control.h>
114 #include <sun2/sun2/fc.h>
115 #include <sun2/sun2/machdep.h>
116 
117 #ifdef DDB
118 #include <ddb/db_output.h>
119 #else
120 #define db_printf printf
121 #endif
122 
123 /* Verify this correspondence between definitions. */
124 #if	(PMAP_OBIO << PG_MOD_SHIFT) != PGT_OBIO
125 #error	"PMAP_XXX definitions don't match pte.h!"
126 #endif
127 
128 /* Type bits in a "pseudo" physical address. (XXX: pmap.h?) */
129 #define PMAP_TYPE	PMAP_MBIO
130 
131 /*
132  * Local convenience macros
133  */
134 
135 #define DVMA_MAP_END	(DVMA_MAP_BASE + DVMA_MAP_AVAIL)
136 
137 /* User segments are all of them. */
138 #define	NUSEG	(NSEGMAP)
139 
140 #define VA_SEGNUM(x)	((u_int)(x) >> SEGSHIFT)
141 
142 /*
143  * Only "main memory" pages are registered in the pv_lists.
144  * This macro is used to determine if a given pte refers to
145  * "main memory" or not.  One slight hack here deserves more
146  * explanation:  On the Sun-2, the bwtwo and zs1 appear
147  * as PG_OBMEM devices at 0x00700000 and 0x00780000,
148  * respectively.  We do not want to consider these as
149  * "main memory" so the macro below treats obmem addresses
150  * >= 0x00700000 as device addresses.  NB: this means for now,
151  * you can't have a headless Sun-2 with 8MB of main memory.
152  */
153 #define	IS_MAIN_MEM(pte) (((pte) & PG_TYPE) == 0 && PG_PA(pte) < 0x00700000)
154 
155 /* Does this (pseudo) PA represent device space? */
156 #define PA_IS_DEV(pa) (((pa) & PMAP_TYPE) != 0 || (pa) >= 0x00700000)
157 
158 /*
159  * Is there a Virtually Addressed Cache (VAC) alias problem
160  * if one page is mapped at both a1 and a2?
161  */
162 #define	BADALIAS(a1, a2)	(0)
163 
164 
165 /*
166  * Debugging support.
167  */
168 #define	PMD_ENTER	1
169 #define	PMD_LINK	2
170 #define	PMD_PROTECT	4
171 #define	PMD_SWITCH	8
172 #define PMD_COW		0x10
173 #define PMD_MODBIT	0x20
174 #define PMD_REFBIT	0x40
175 #define PMD_WIRING	0x80
176 #define PMD_CONTEXT	0x100
177 #define PMD_CREATE	0x200
178 #define PMD_SEGMAP	0x400
179 #define PMD_SETPTE	0x800
180 #define PMD_FAULT	0x1000
181 #define PMD_KMAP	0x2000
182 
183 #define	PMD_REMOVE	PMD_ENTER
184 #define	PMD_UNLINK	PMD_LINK
185 
186 #ifdef	PMAP_DEBUG
187 int pmap_debug = 0;
188 int pmap_db_watchva = -1;
189 int pmap_db_watchpmeg = -1;
190 #endif	/* PMAP_DEBUG */
191 
192 /*
193  * Miscellaneous variables.
194  *
195  * For simplicity, this interface retains the variables
196  * that were used in the old interface (without NONCONTIG).
197  * These are set in pmap_bootstrap() and used in
198  * pmap_next_page().
199  */
200 vaddr_t virtual_avail, virtual_end;
201 paddr_t avail_start, avail_end;
202 #define	managed(pa)	(((pa) >= avail_start) && ((pa) < avail_end))
203 
204 /* used to skip a single hole in RAM */
205 static vaddr_t hole_start, hole_size;
206 
207 /* This is for pmap_next_page() */
208 static paddr_t avail_next;
209 
210 /* This is where we map a PMEG without a context. */
211 static vaddr_t temp_seg_va;
212 #ifdef DIAGNOSTIC
213 static int temp_seg_inuse;
214 #endif
215 
216 /*
217  * Location to store virtual addresses
218  * to be used in copy/zero operations.
219  */
220 vaddr_t tmp_vpages[2] = {
221 	NBPG * 8,
222 	NBPG * 9 };
223 int tmp_vpages_inuse;
224 
225 static int pmap_version = 1;
226 struct pmap kernel_pmap_store;
227 #define kernel_pmap (&kernel_pmap_store)
228 static u_char kernel_segmap[NSEGMAP];
229 
230 /* memory pool for pmap structures */
231 struct pool	pmap_pmap_pool;
232 
233 /* statistics... */
234 struct pmap_stats {
235 	int	ps_enter_firstpv;	/* pv heads entered */
236 	int	ps_enter_secondpv;	/* pv nonheads entered */
237 	int	ps_unlink_pvfirst;	/* of pv_unlinks on head */
238 	int	ps_unlink_pvsearch;	/* of pv_unlink searches */
239 	int	ps_pmeg_faultin;	/* pmegs reloaded */
240 	int	ps_changeprots;		/* of calls to changeprot */
241 	int	ps_changewire;		/* useless wiring changes */
242 	int	ps_npg_prot_all;	/* of active pages protected */
243 	int	ps_npg_prot_actual;	/* pages actually affected */
244 	int	ps_vac_uncached;	/* non-cached due to bad alias */
245 	int	ps_vac_recached;	/* re-cached when bad alias gone */
246 } pmap_stats;
247 
248 #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
249 #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
250 #define pmap_add_ref(pmap) ++pmap->pm_refcount
251 #define pmap_del_ref(pmap) --pmap->pm_refcount
252 #define pmap_refcount(pmap) pmap->pm_refcount
253 
254 #ifdef	PMAP_DEBUG
255 #define	CHECK_SPL() do { \
256 	if ((getsr() & PSL_IPL) < PSL_IPL4) \
257 		panic("pmap: bad spl, line %d", __LINE__); \
258 } while (0)
259 #else	/* PMAP_DEBUG */
260 #define	CHECK_SPL() (void)0
261 #endif	/* PMAP_DEBUG */
262 
263 
264 /*
265  * PV support.
266  * (i.e. Find all virtual mappings of a physical page.)
267  */
268 
269 int pv_initialized = 0;
270 
271 /* One of these for each mapped virtual page. */
272 struct pv_entry {
273 	struct pv_entry *pv_next;
274 	pmap_t	       pv_pmap;
275 	vaddr_t        pv_va;
276 };
277 typedef struct pv_entry *pv_entry_t;
278 
279 /* Table of PV list heads (per physical page). */
280 static struct pv_entry **pv_head_tbl;
281 
282 /* Free list of PV entries. */
283 static struct pv_entry *pv_free_list;
284 
285 /* Table of flags (per physical page). */
286 static u_char *pv_flags_tbl;
287 
288 /* These are as in the MMU but shifted by PV_SHIFT. */
289 #define PV_SHIFT	20
290 #define PV_VALID  (PG_VALID >> PV_SHIFT)
291 #define PV_NC     (PG_NC >> PV_SHIFT)
292 #define PV_TYPE   (PG_TYPE >> PV_SHIFT)
293 #define PV_REF    (PG_REF >> PV_SHIFT)
294 #define PV_MOD    (PG_MOD >> PV_SHIFT)
295 
296 
297 /*
298  * context structures, and queues
299  */
300 
301 struct context_state {
302 	TAILQ_ENTRY(context_state) context_link;
303 	int            context_num;
304 	struct pmap   *context_upmap;
305 };
306 typedef struct context_state *context_t;
307 
308 #define INVALID_CONTEXT -1	/* impossible value */
309 #define EMPTY_CONTEXT 0
310 #define KERNEL_CONTEXT 0
311 #define FIRST_CONTEXT 1
312 #define	has_context(pmap)	(((pmap)->pm_ctxnum != EMPTY_CONTEXT) == ((pmap) != kernel_pmap))
313 
314 TAILQ_HEAD(context_tailq, context_state)
315 	context_free_queue, context_active_queue;
316 
317 static struct context_state context_array[NCONTEXT];
318 
319 
320 /*
321  * PMEG structures, queues, and macros
322  */
323 #define PMEGQ_FREE     0
324 #define PMEGQ_INACTIVE 1
325 #define PMEGQ_ACTIVE   2
326 #define PMEGQ_KERNEL   3
327 #define PMEGQ_NONE     4
328 
329 struct pmeg_state {
330 	TAILQ_ENTRY(pmeg_state) pmeg_link;
331 	int            pmeg_index;
332 	pmap_t         pmeg_owner;
333 	int            pmeg_version;
334 	vaddr_t        pmeg_va;
335 	int            pmeg_wired;
336 	int            pmeg_reserved;
337 	int            pmeg_vpages;
338 	int            pmeg_qstate;
339 };
340 
341 typedef struct pmeg_state *pmeg_t;
342 
343 #define PMEG_INVAL (NPMEG-1)
344 #define PMEG_NULL (pmeg_t) NULL
345 
346 /* XXX - Replace pmeg_kernel_queue with pmeg_wired_queue ? */
347 TAILQ_HEAD(pmeg_tailq, pmeg_state)
348 	pmeg_free_queue, pmeg_inactive_queue,
349 	pmeg_active_queue, pmeg_kernel_queue;
350 
351 static struct pmeg_state pmeg_array[NPMEG];
352 
353 
354 /*
355  * prototypes
356  */
357 static int get_pte_pmeg __P((int, int));
358 static void set_pte_pmeg __P((int, int, int));
359 
360 static void context_allocate __P((pmap_t));
361 static void context_free __P((pmap_t));
362 static void context_init __P((void));
363 
364 static void pmeg_init __P((void));
365 static void pmeg_reserve __P((int));
366 
367 static pmeg_t pmeg_allocate __P((pmap_t, vaddr_t));
368 static void pmeg_mon_init __P((vaddr_t, vaddr_t, int));
369 static void pmeg_release __P((pmeg_t));
370 static void pmeg_free __P((pmeg_t));
371 static pmeg_t pmeg_cache __P((pmap_t, vaddr_t));
372 static void pmeg_set_wiring __P((pmeg_t, vaddr_t, int));
373 
374 static int  pv_link   __P((pmap_t, int, vaddr_t));
375 static void pv_unlink __P((pmap_t, int, vaddr_t));
376 static void pv_remove_all __P((paddr_t));
377 static void pv_changepte __P((paddr_t, int, int));
378 static u_int pv_syncflags __P((pv_entry_t));
379 static void pv_init __P((void));
380 
381 static void pmeg_clean __P((pmeg_t));
382 static void pmeg_clean_free __P((void));
383 
384 static void pmap_common_init __P((pmap_t));
385 static void pmap_kernel_init __P((pmap_t));
386 static void pmap_user_init __P((pmap_t));
387 static void pmap_page_upload __P((void));
388 
389 static void pmap_enter_kernel __P((vaddr_t, int, boolean_t));
390 static void pmap_enter_user __P((pmap_t, vaddr_t, int, boolean_t));
391 
392 static void pmap_protect1 __P((pmap_t, vaddr_t, vaddr_t));
393 static void pmap_protect_mmu __P((pmap_t, vaddr_t, vaddr_t));
394 static void pmap_protect_noctx __P((pmap_t, vaddr_t, vaddr_t));
395 
396 static void pmap_remove1 __P((pmap_t, vaddr_t, vaddr_t));
397 static void pmap_remove_mmu __P((pmap_t, vaddr_t, vaddr_t));
398 static void pmap_remove_noctx __P((pmap_t, vaddr_t, vaddr_t));
399 
400 static int  pmap_fault_reload __P((struct pmap *, vaddr_t, int));
401 
402 /* Called only from locore.s and pmap.c */
403 void	_pmap_switch __P((pmap_t));
404 
405 #ifdef	PMAP_DEBUG
406 void pmap_print __P((pmap_t));
407 void pv_print __P((struct vm_page *));
408 void pmeg_print __P((pmeg_t));
409 static void pmeg_verify_empty __P((vaddr_t));
410 #endif	/* PMAP_DEBUG */
411 void pmap_pinit __P((pmap_t));
412 void pmap_release __P((pmap_t));
413 
414 /*
415  * Various in-line helper functions.
416  */
417 
418 static inline pmap_t
419 current_pmap __P((void))
420 {
421 	struct proc *p;
422 	struct vmspace *vm;
423 	struct vm_map *map;
424 	pmap_t	pmap;
425 
426 	p = curproc;	/* XXX */
427 	if (p == NULL)
428 		pmap = kernel_pmap;
429 	else {
430 		vm = p->p_vmspace;
431 		map = &vm->vm_map;
432 		pmap = vm_map_pmap(map);
433 	}
434 
435 	return (pmap);
436 }
437 
438 static inline struct pv_entry **
439 pa_to_pvhead(paddr_t pa)
440 {
441 	int idx;
442 
443 	idx = PA_PGNUM(pa);
444 #ifdef	DIAGNOSTIC
445 	if (PA_IS_DEV(pa) || (idx >= physmem))
446 		panic("pmap:pa_to_pvhead: bad pa=0x%lx", pa);
447 #endif
448 	return (&pv_head_tbl[idx]);
449 }
450 
451 static inline u_char *
452 pa_to_pvflags(paddr_t pa)
453 {
454 	int idx;
455 
456 	idx = PA_PGNUM(pa);
457 #ifdef	DIAGNOSTIC
458 	if (PA_IS_DEV(pa) || (idx >= physmem))
459 		panic("pmap:pa_to_pvflags: bad pa=0x%lx", pa);
460 #endif
461 	return (&pv_flags_tbl[idx]);
462 }
463 
464 /*
465  * Save the MOD bit from the given PTE using its PA
466  */
467 static inline void
468 save_modref_bits(int pte)
469 {
470 	u_char *pv_flags;
471 
472 	pv_flags = pa_to_pvflags(PG_PA(pte));
473 	*pv_flags |= ((pte & PG_MODREF) >> PV_SHIFT);
474 }
475 
476 static inline pmeg_t
477 pmeg_p(int sme)
478 {
479 #ifdef	DIAGNOSTIC
480 	if (sme < 0 || sme >= SEGINV)
481 		panic("pmeg_p: bad sme");
482 #endif
483 	return &pmeg_array[sme];
484 }
485 
486 #define is_pmeg_wired(pmegp) (pmegp->pmeg_wired != 0)
487 
488 static void
489 pmeg_set_wiring(pmegp, va, flag)
490 	pmeg_t pmegp;
491 	vaddr_t va;
492 	int flag;
493 {
494 	int idx, mask;
495 
496 	idx = VA_PTE_NUM(va);
497 	mask = 1 << idx;
498 
499 	if (flag)
500 		pmegp->pmeg_wired |= mask;
501 	else
502 		pmegp->pmeg_wired &= ~mask;
503 }
504 
505 /****************************************************************
506  * Context management functions.
507  */
508 
509 /* part of pmap_bootstrap */
510 static void
511 context_init()
512 {
513 	int i;
514 
515 	TAILQ_INIT(&context_free_queue);
516 	TAILQ_INIT(&context_active_queue);
517 
518 	/* Leave EMPTY_CONTEXT out of the free list. */
519 	context_array[0].context_upmap = kernel_pmap;
520 
521 	for (i = 1; i < NCONTEXT; i++) {
522 		context_array[i].context_num = i;
523 		context_array[i].context_upmap = NULL;
524 		TAILQ_INSERT_TAIL(&context_free_queue, &context_array[i],
525 						  context_link);
526 #ifdef	PMAP_DEBUG
527 		if (pmap_debug & PMD_CONTEXT)
528 			printf("context_init: sizeof(context_array[0])=%d\n",
529 				   sizeof(context_array[0]));
530 #endif
531 	}
532 }
533 
534 /* Get us a context (steal one if necessary). */
535 static void
536 context_allocate(pmap)
537 	pmap_t pmap;
538 {
539 	context_t context;
540 
541 	CHECK_SPL();
542 
543 #ifdef	DIAGNOSTIC
544 	if (pmap == kernel_pmap)
545 		panic("context_allocate: kernel_pmap");
546 	if (has_context(pmap))
547 		panic("pmap: pmap already has context allocated to it");
548 #endif
549 
550 	context = TAILQ_FIRST(&context_free_queue);
551 	if (context == NULL) {
552 		/* Steal the head of the active queue. */
553 		context = TAILQ_FIRST(&context_active_queue);
554 		if (context == NULL)
555 			panic("pmap: no contexts left?");
556 #ifdef	PMAP_DEBUG
557 		if (pmap_debug & PMD_CONTEXT)
558 			printf("context_allocate: steal ctx %d from pmap %p\n",
559 				   context->context_num, context->context_upmap);
560 #endif
561 		context_free(context->context_upmap);
562 		context = TAILQ_FIRST(&context_free_queue);
563 	}
564 	TAILQ_REMOVE(&context_free_queue, context, context_link);
565 
566 #ifdef DIAGNOSTIC
567 	if (context->context_upmap != NULL)
568 		panic("pmap: context in use???");
569 #endif
570 
571 	context->context_upmap = pmap;
572 	pmap->pm_ctxnum = context->context_num;
573 
574 	TAILQ_INSERT_TAIL(&context_active_queue, context, context_link);
575 
576 	/*
577 	 * We could reload the MMU here, but that would
578 	 * artificially move PMEGs from the inactive queue
579 	 * to the active queue, so do lazy reloading.
580 	 * XXX - Need to reload wired pmegs though...
581 	 * XXX: Verify the context it is empty?
582 	 */
583 }
584 
585 /*
586  * Unload the context and put it on the free queue.
587  */
588 static void
589 context_free(pmap)		/* :) */
590 	pmap_t pmap;
591 {
592 	int saved_ctxnum, ctxnum;
593 	int i, sme;
594 	context_t contextp;
595 	vaddr_t va;
596 
597 	CHECK_SPL();
598 
599 	ctxnum = pmap->pm_ctxnum;
600 	if (ctxnum < FIRST_CONTEXT || ctxnum >= NCONTEXT)
601 		panic("pmap: context_free ctxnum");
602 	contextp = &context_array[ctxnum];
603 
604 	/* Temporary context change. */
605 	saved_ctxnum = get_context();
606 	set_context(ctxnum);
607 
608 	/* Before unloading translations, flush cache. */
609 #ifdef	HAVECACHE
610 	if (cache_size)
611 		cache_flush_context();
612 #endif
613 
614 	/* Unload MMU (but keep in SW segmap). */
615 	for (i=0, va=0; i < NUSEG; i++, va+=NBSG) {
616 
617 #if !defined(PMAP_DEBUG)
618 		/* Short-cut using the S/W segmap (if !debug). */
619 		if (pmap->pm_segmap[i] == SEGINV)
620 			continue;
621 #endif
622 
623 		/* Check the H/W segmap. */
624 		sme = get_segmap(va);
625 		if (sme == SEGINV)
626 			continue;
627 
628 		/* Found valid PMEG in the segmap. */
629 #ifdef	PMAP_DEBUG
630 		if (pmap_debug & PMD_SEGMAP)
631 			printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x new=ff (cf)\n",
632 				   ctxnum, va, sme);
633 #endif
634 #ifdef	DIAGNOSTIC
635 		if (sme != pmap->pm_segmap[i])
636 			panic("context_free: unknown sme at va=0x%lx", va);
637 #endif
638 		/* Did cache flush above (whole context). */
639 		set_segmap(va, SEGINV);
640 		/* In this case, do not clear pm_segmap. */
641 		/* XXX: Maybe inline this call? */
642 		pmeg_release(pmeg_p(sme));
643 	}
644 
645 	/* Restore previous context. */
646 	set_context(saved_ctxnum);
647 
648 	/* Dequeue, update, requeue. */
649 	TAILQ_REMOVE(&context_active_queue, contextp, context_link);
650 	pmap->pm_ctxnum = EMPTY_CONTEXT;
651 	contextp->context_upmap = NULL;
652 	TAILQ_INSERT_TAIL(&context_free_queue, contextp, context_link);
653 }
654 
655 
656 /****************************************************************
657  * PMEG management functions.
658  */
659 
660 static void
661 pmeg_init()
662 {
663 	int x;
664 
665 	/* clear pmeg array, put it all on the free pmeq queue */
666 
667 	TAILQ_INIT(&pmeg_free_queue);
668 	TAILQ_INIT(&pmeg_inactive_queue);
669 	TAILQ_INIT(&pmeg_active_queue);
670 	TAILQ_INIT(&pmeg_kernel_queue);
671 
672 	memset(pmeg_array, 0, NPMEG*sizeof(struct pmeg_state));
673 	for (x =0 ; x<NPMEG; x++) {
674 		TAILQ_INSERT_TAIL(&pmeg_free_queue, &pmeg_array[x],
675 				  pmeg_link);
676 		pmeg_array[x].pmeg_qstate = PMEGQ_FREE;
677 		pmeg_array[x].pmeg_index = x;
678 	}
679 
680 	/* The last pmeg is not usable. */
681 	pmeg_reserve(SEGINV);
682 }
683 
684 /*
685  * Reserve a pmeg (forever) for use by PROM, etc.
686  * Contents are left as-is.  Called very early...
687  */
688 void
689 pmeg_reserve(sme)
690 	int sme;
691 {
692 	pmeg_t pmegp;
693 
694 	/* Can not use pmeg_p() because it fails on SEGINV. */
695 	pmegp = &pmeg_array[sme];
696 
697 	if (pmegp->pmeg_reserved) {
698 		prom_printf("pmeg_reserve: already reserved\n");
699 		prom_abort();
700 	}
701 	if (pmegp->pmeg_owner) {
702 		prom_printf("pmeg_reserve: already owned\n");
703 		prom_abort();
704 	}
705 
706 	/* Owned by kernel, but not really usable... */
707 	pmegp->pmeg_owner = kernel_pmap;
708 	pmegp->pmeg_reserved++;	/* keep count, just in case */
709 	TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
710 	pmegp->pmeg_qstate = PMEGQ_NONE;
711 }
712 
713 /*
714  * Examine PMEGs used by the monitor, and either
715  * reserve them (keep=1) or clear them (keep=0)
716  */
717 static void
718 pmeg_mon_init(sva, eva, keep)
719 	vaddr_t sva, eva;
720 	int keep;	/* true: steal, false: clear */
721 {
722 	vaddr_t pgva, endseg;
723 	int pte, valid;
724 	unsigned char sme;
725 
726 #ifdef	PMAP_DEBUG
727 	if (pmap_debug & PMD_SEGMAP)
728 		prom_printf("pmeg_mon_init(0x%x, 0x%x, %d)\n",
729 		           sva, eva, keep);
730 #endif
731 
732 	sva &= ~(NBSG-1);
733 
734 	while (sva < eva) {
735 		sme = get_segmap(sva);
736 		if (sme != SEGINV) {
737 			valid = 0;
738 			endseg = sva + NBSG;
739 			for (pgva = sva; pgva < endseg; pgva += NBPG) {
740 				pte = get_pte(pgva);
741 				if (pte & PG_VALID) {
742 					valid++;
743 				}
744 			}
745 #ifdef	PMAP_DEBUG
746 			if (pmap_debug & PMD_SEGMAP)
747 				prom_printf(" sva=0x%x seg=0x%x valid=%d\n",
748 				           sva, sme, valid);
749 #endif
750 			if (keep && valid)
751 				pmeg_reserve(sme);
752 			else set_segmap(sva, SEGINV);
753 		}
754 		sva += NBSG;
755 	}
756 }
757 
758 /*
759  * This is used only during pmap_bootstrap, so we can
760  * get away with borrowing a slot in the segmap.
761  */
762 static void
763 pmeg_clean(pmegp)
764 	pmeg_t pmegp;
765 {
766 	int sme;
767 	vaddr_t va;
768 
769 	sme = get_segmap(temp_seg_va);
770 	if (sme != SEGINV)
771 		panic("pmeg_clean");
772 
773 	sme = pmegp->pmeg_index;
774 	set_segmap(temp_seg_va, sme);
775 
776 	for (va = 0; va < NBSG; va += NBPG)
777 		set_pte(temp_seg_va + va, PG_INVAL);
778 
779 	set_segmap(temp_seg_va, SEGINV);
780 }
781 
782 /*
783  * This routine makes sure that pmegs on the pmeg_free_queue contain
784  * no valid ptes.  It pulls things off the queue, cleans them, and
785  * puts them at the end.  The ending condition is finding the first
786  * queue element at the head of the queue again.
787  */
788 static void
789 pmeg_clean_free()
790 {
791 	pmeg_t pmegp, pmegp_first;
792 
793 	pmegp = TAILQ_FIRST(&pmeg_free_queue);
794 	if (pmegp == NULL)
795 		panic("pmap: no free pmegs available to clean");
796 
797 	pmegp_first = NULL;
798 
799 	for (;;) {
800 		pmegp = TAILQ_FIRST(&pmeg_free_queue);
801 		TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
802 
803 		pmegp->pmeg_qstate = PMEGQ_NONE;
804 		pmeg_clean(pmegp);
805 		pmegp->pmeg_qstate = PMEGQ_FREE;
806 
807 		TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
808 
809 		if (pmegp == pmegp_first)
810 			break;
811 		if (pmegp_first == NULL)
812 			pmegp_first = pmegp;
813 	}
814 }
815 
816 /*
817  * Allocate a PMEG by whatever means necessary.
818  * (May invalidate some mappings!)
819  */
820 static pmeg_t
821 pmeg_allocate(pmap, va)
822 	pmap_t pmap;
823 	vaddr_t va;
824 {
825 	pmeg_t pmegp;
826 
827 	CHECK_SPL();
828 
829 #ifdef	DIAGNOSTIC
830 	if (va & SEGOFSET) {
831 		panic("pmap:pmeg_allocate: va=0x%lx", va);
832 	}
833 #endif
834 
835 	/* Get one onto the free list if necessary. */
836 	pmegp = TAILQ_FIRST(&pmeg_free_queue);
837 	if (!pmegp) {
838 		/* Try inactive queue... */
839 		pmegp = TAILQ_FIRST(&pmeg_inactive_queue);
840 		if (!pmegp) {
841 			/* Try active queue... */
842 			pmegp = TAILQ_FIRST(&pmeg_active_queue);
843 		}
844 		if (!pmegp) {
845 			panic("pmeg_allocate: failed");
846 		}
847 		/*
848 		 * Remove mappings to free-up a pmeg
849 		 * (so it will go onto the free list).
850 		 * XXX - Skip this one if it is wired?
851 		 */
852 		pmap_remove1(pmegp->pmeg_owner,
853 		             pmegp->pmeg_va,
854 		             pmegp->pmeg_va + NBSG);
855 	}
856 
857 	/* OK, free list has something for us to take. */
858 	pmegp = TAILQ_FIRST(&pmeg_free_queue);
859 #ifdef	DIAGNOSTIC
860 	if (pmegp == NULL)
861 		panic("pmeg_allocagte: still none free?");
862 	if ((pmegp->pmeg_qstate != PMEGQ_FREE) ||
863 		(pmegp->pmeg_index == SEGINV) ||
864 		(pmegp->pmeg_vpages))
865 		panic("pmeg_allocate: bad pmegp=%p", pmegp);
866 #endif
867 #ifdef	PMAP_DEBUG
868 	if (pmegp->pmeg_index == pmap_db_watchpmeg) {
869 		db_printf("pmeg_allocate: watch pmegp=%p\n", pmegp);
870 		Debugger();
871 	}
872 #endif
873 
874 	TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
875 
876 	/* Reassign this PMEG for the caller. */
877 	pmegp->pmeg_owner = pmap;
878 	pmegp->pmeg_version = pmap->pm_version;
879 	pmegp->pmeg_va = va;
880 	pmegp->pmeg_wired = 0;
881 	pmegp->pmeg_reserved  = 0;
882 	pmegp->pmeg_vpages  = 0;
883 	if (pmap == kernel_pmap) {
884 		TAILQ_INSERT_TAIL(&pmeg_kernel_queue, pmegp, pmeg_link);
885 		pmegp->pmeg_qstate = PMEGQ_KERNEL;
886 	} else {
887 		TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
888 		pmegp->pmeg_qstate = PMEGQ_ACTIVE;
889 	}
890 	/* Caller will verify that it's empty (if debugging). */
891 	return pmegp;
892 }
893 
894 /*
895  * Put pmeg on the inactive queue, leaving its contents intact.
896  * This happens when we loose our context.  We may reclaim
897  * this pmeg later if it is still in the inactive queue.
898  */
899 static void
900 pmeg_release(pmegp)
901 	pmeg_t pmegp;
902 {
903 
904 	CHECK_SPL();
905 
906 #ifdef	DIAGNOSTIC
907 	if ((pmegp->pmeg_owner == kernel_pmap) ||
908 		(pmegp->pmeg_qstate != PMEGQ_ACTIVE))
909 		panic("pmeg_release: bad pmeg=%p", pmegp);
910 #endif
911 
912 	TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
913 	pmegp->pmeg_qstate = PMEGQ_INACTIVE;
914 	TAILQ_INSERT_TAIL(&pmeg_inactive_queue, pmegp, pmeg_link);
915 }
916 
917 /*
918  * Move the pmeg to the free queue from wherever it is.
919  * The pmeg will be clean.  It might be in kernel_pmap.
920  */
921 static void
922 pmeg_free(pmegp)
923 	pmeg_t pmegp;
924 {
925 
926 	CHECK_SPL();
927 
928 #ifdef	DIAGNOSTIC
929 	/* Caller should verify that it's empty. */
930 	if (pmegp->pmeg_vpages != 0)
931 		panic("pmeg_free: vpages");
932 #endif
933 
934 	switch (pmegp->pmeg_qstate) {
935 	case PMEGQ_ACTIVE:
936 		TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
937 		break;
938 	case PMEGQ_INACTIVE:
939 		TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
940 		break;
941 	case PMEGQ_KERNEL:
942 		TAILQ_REMOVE(&pmeg_kernel_queue, pmegp, pmeg_link);
943 		break;
944 	default:
945 		panic("pmeg_free: releasing bad pmeg");
946 		break;
947 	}
948 
949 #ifdef	PMAP_DEBUG
950 	if (pmegp->pmeg_index == pmap_db_watchpmeg) {
951 		db_printf("pmeg_free: watch pmeg 0x%x\n",
952 			   pmegp->pmeg_index);
953 		Debugger();
954 	}
955 #endif
956 
957 	pmegp->pmeg_owner = NULL;
958 	pmegp->pmeg_qstate = PMEGQ_FREE;
959 	TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
960 }
961 
962 /*
963  * Find a PMEG that was put on the inactive queue when we
964  * had our context stolen.  If found, move to active queue.
965  */
966 static pmeg_t
967 pmeg_cache(pmap, va)
968 	pmap_t pmap;
969 	vaddr_t va;
970 {
971 	int sme, segnum;
972 	pmeg_t pmegp;
973 
974 	CHECK_SPL();
975 
976 #ifdef	DIAGNOSTIC
977 	if (pmap == kernel_pmap)
978 		panic("pmeg_cache: kernel_pmap");
979 	if (va & SEGOFSET) {
980 		panic("pmap:pmeg_cache: va=0x%lx", va);
981 	}
982 #endif
983 
984 	if (pmap->pm_segmap == NULL)
985 		return PMEG_NULL;
986 
987 	segnum = VA_SEGNUM(va);
988 	if (segnum > NUSEG)		/* out of range */
989 		return PMEG_NULL;
990 
991 	sme = pmap->pm_segmap[segnum];
992 	if (sme == SEGINV)	/* nothing cached */
993 		return PMEG_NULL;
994 
995 	pmegp = pmeg_p(sme);
996 
997 #ifdef	PMAP_DEBUG
998 	if (pmegp->pmeg_index == pmap_db_watchpmeg) {
999 		db_printf("pmeg_cache: watch pmeg 0x%x\n", pmegp->pmeg_index);
1000 		Debugger();
1001 	}
1002 #endif
1003 
1004 	/*
1005 	 * Our segmap named a PMEG.  If it is no longer ours,
1006 	 * invalidate that entry in our segmap and return NULL.
1007 	 */
1008 	if ((pmegp->pmeg_owner != pmap) ||
1009 		(pmegp->pmeg_version != pmap->pm_version) ||
1010 		(pmegp->pmeg_va != va))
1011 	{
1012 #ifdef	PMAP_DEBUG
1013 		db_printf("pmap:pmeg_cache: invalid pmeg: sme=0x%x\n", sme);
1014 		pmeg_print(pmegp);
1015 		Debugger();
1016 #endif
1017 		pmap->pm_segmap[segnum] = SEGINV;
1018 		return PMEG_NULL; /* cache lookup failed */
1019 	}
1020 
1021 #ifdef	DIAGNOSTIC
1022 	/* Make sure it is on the inactive queue. */
1023 	if (pmegp->pmeg_qstate != PMEGQ_INACTIVE)
1024 		panic("pmeg_cache: pmeg was taken: %p", pmegp);
1025 #endif
1026 
1027 	TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
1028 	pmegp->pmeg_qstate = PMEGQ_ACTIVE;
1029 	TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
1030 
1031 	return pmegp;
1032 }
1033 
1034 #ifdef	PMAP_DEBUG
1035 static void
1036 pmeg_verify_empty(va)
1037 	vaddr_t va;
1038 {
1039 	vaddr_t eva;
1040 	int pte;
1041 
1042 	for (eva = va + NBSG;  va < eva; va += NBPG) {
1043 		pte = get_pte(va);
1044 		if (pte & PG_VALID)
1045 			panic("pmeg_verify_empty");
1046 	}
1047 }
1048 #endif	/* PMAP_DEBUG */
1049 
1050 
1051 /****************************************************************
1052  * Physical-to-virutal lookup support
1053  *
1054  * Need memory for the pv_alloc/pv_free list heads
1055  * and elements.  We know how many to allocate since
1056  * there is one list head for each physical page, and
1057  * at most one element for each PMEG slot.
1058  */
1059 static void
1060 pv_init()
1061 {
1062 	int npp, nvp, sz;
1063 	pv_entry_t pv;
1064 	char *p;
1065 
1066 	/* total allocation size */
1067 	sz = 0;
1068 
1069 	/*
1070 	 * Data for each physical page.
1071 	 * Each "mod/ref" flag is a char.
1072 	 * Each PV head is a pointer.
1073 	 * Note physmem is in pages.
1074 	 */
1075 	npp = ALIGN(physmem);
1076 	sz += (npp * sizeof(*pv_flags_tbl));
1077 	sz += (npp * sizeof(*pv_head_tbl));
1078 
1079 	/*
1080 	 * Data for each virtual page (all PMEGs).
1081 	 * One pv_entry for each page frame.
1082 	 */
1083 	nvp = NPMEG * NPAGSEG;
1084 	sz += (nvp * sizeof(*pv_free_list));
1085 
1086 	/* Now allocate the whole thing. */
1087 	sz = m68k_round_page(sz);
1088 	p = (char *)uvm_km_alloc(kernel_map, sz);
1089 	if (p == NULL)
1090 		panic("pmap:pv_init: alloc failed");
1091 	memset(p, 0, sz);
1092 
1093 	/* Now divide up the space. */
1094 	pv_flags_tbl = (void *) p;
1095 	p += (npp * sizeof(*pv_flags_tbl));
1096 	pv_head_tbl = (void*) p;
1097 	p += (npp * sizeof(*pv_head_tbl));
1098 	pv_free_list = (void *)p;
1099 	p += (nvp * sizeof(*pv_free_list));
1100 
1101 	/* Finally, make pv_free_list into a list. */
1102 	for (pv = pv_free_list; (char *)pv < p; pv++)
1103 		pv->pv_next = &pv[1];
1104 	pv[-1].pv_next = 0;
1105 
1106 	pv_initialized++;
1107 }
1108 
1109 /*
1110  * Set or clear bits in all PTEs mapping a page.
1111  * Also does syncflags work while we are there...
1112  */
1113 static void
1114 pv_changepte(pa, set_bits, clear_bits)
1115 	paddr_t pa;
1116 	int set_bits;
1117 	int clear_bits;
1118 {
1119 	pv_entry_t *head, pv;
1120 	u_char *pv_flags;
1121 	pmap_t pmap;
1122 	vaddr_t va;
1123 	int pte, sme;
1124 	int saved_ctx;
1125 	boolean_t in_ctx;
1126 	u_int flags;
1127 
1128 	pv_flags = pa_to_pvflags(pa);
1129 	head     = pa_to_pvhead(pa);
1130 
1131 	/* If no mappings, no work to do. */
1132 	if (*head == NULL)
1133 		return;
1134 
1135 #ifdef	DIAGNOSTIC
1136 	/* This function should only clear these bits: */
1137 	if (clear_bits & ~(PG_WRITE | PG_NC | PG_REF | PG_MOD))
1138 		panic("pv_changepte: clear=0x%x", clear_bits);
1139 #endif
1140 
1141 	flags = 0;
1142 	saved_ctx = get_context();
1143 	for (pv = *head; pv != NULL; pv = pv->pv_next) {
1144 		pmap = pv->pv_pmap;
1145 		va = pv->pv_va;
1146 
1147 #ifdef	DIAGNOSTIC
1148 		if (pmap->pm_segmap == NULL)
1149 			panic("pv_changepte: null segmap");
1150 #endif
1151 
1152 		/* Is the PTE currently accessible in some context? */
1153 		in_ctx = FALSE;
1154 		sme = SEGINV;	/* kill warning */
1155 		if (pmap == kernel_pmap) {
1156 			set_context(KERNEL_CONTEXT);
1157 			in_ctx = TRUE;
1158 		}
1159 		else if (has_context(pmap)) {
1160 			/* PMEG may be inactive. */
1161 			set_context(pmap->pm_ctxnum);
1162 			sme = get_segmap(va);
1163 			if (sme != SEGINV)
1164 				in_ctx = TRUE;
1165 		}
1166 
1167 		if (in_ctx == TRUE) {
1168 			/*
1169 			 * The PTE is in the current context.
1170 			 * Make sure PTE is up-to-date with VAC.
1171 			 */
1172 #ifdef	HAVECACHE
1173 			if (cache_size)
1174 				cache_flush_page(va);
1175 #endif
1176 			pte = get_pte(va);
1177 		} else {
1178 
1179 			/*
1180 			 * The PTE is not in any context.
1181 			 */
1182 
1183 			sme = pmap->pm_segmap[VA_SEGNUM(va)];
1184 #ifdef	DIAGNOSTIC
1185 			if (sme == SEGINV)
1186 				panic("pv_changepte: SEGINV");
1187 #endif
1188 			pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
1189 		}
1190 
1191 #ifdef	DIAGNOSTIC
1192 		/* PV entries point only to valid mappings. */
1193 		if ((pte & PG_VALID) == 0)
1194 			panic("pv_changepte: not PG_VALID at va=0x%lx", va);
1195 #endif
1196 		/* Get these while it's easy. */
1197 		if (pte & PG_MODREF) {
1198 			flags |= (pte & PG_MODREF);
1199 			pte &= ~PG_MODREF;
1200 		}
1201 
1202 		/* Finally, set and clear some bits. */
1203 		pte |= set_bits;
1204 		pte &= ~clear_bits;
1205 
1206 		if (in_ctx == TRUE) {
1207 			/* Did cache flush above. */
1208 			set_pte(va, pte);
1209 		} else {
1210 			set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
1211 		}
1212 	}
1213 	set_context(saved_ctx);
1214 
1215 	*pv_flags |= (flags >> PV_SHIFT);
1216 }
1217 
1218 /*
1219  * Return ref and mod bits from pvlist,
1220  * and turns off same in hardware PTEs.
1221  */
1222 static u_int
1223 pv_syncflags(pv)
1224 	pv_entry_t pv;
1225 {
1226 	pmap_t pmap;
1227 	vaddr_t va;
1228 	int pte, sme;
1229 	int saved_ctx;
1230 	boolean_t in_ctx;
1231 	u_int flags;
1232 
1233 	/* If no mappings, no work to do. */
1234 	if (pv == NULL)
1235 		return (0);
1236 
1237 	flags = 0;
1238 	saved_ctx = get_context();
1239 	for (; pv != NULL; pv = pv->pv_next) {
1240 		pmap = pv->pv_pmap;
1241 		va = pv->pv_va;
1242 		sme = SEGINV;
1243 
1244 #ifdef	DIAGNOSTIC
1245 		/*
1246 		 * Only the head may have a null pmap, and
1247 		 * we checked for that above.
1248 		 */
1249 		if (pmap->pm_segmap == NULL)
1250 			panic("pv_syncflags: null segmap");
1251 #endif
1252 
1253 		/* Is the PTE currently accessible in some context? */
1254 		in_ctx = FALSE;
1255 		if (pmap == kernel_pmap) {
1256 			set_context(KERNEL_CONTEXT);
1257 			in_ctx = TRUE;
1258 		}
1259 		else if (has_context(pmap)) {
1260 			/* PMEG may be inactive. */
1261 			set_context(pmap->pm_ctxnum);
1262 			sme = get_segmap(va);
1263 			if (sme != SEGINV)
1264 				in_ctx = TRUE;
1265 		}
1266 
1267 		if (in_ctx == TRUE) {
1268 
1269 			/*
1270 			 * The PTE is in the current context.
1271 			 * Make sure PTE is up-to-date with VAC.
1272 			 */
1273 
1274 #ifdef	HAVECACHE
1275 			if (cache_size)
1276 				cache_flush_page(va);
1277 #endif
1278 			pte = get_pte(va);
1279 		} else {
1280 
1281 			/*
1282 			 * The PTE is not in any context.
1283 			 */
1284 
1285 			sme = pmap->pm_segmap[VA_SEGNUM(va)];
1286 #ifdef	DIAGNOSTIC
1287 			if (sme == SEGINV)
1288 				panic("pv_syncflags: SEGINV");
1289 #endif
1290 			pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
1291 		}
1292 
1293 #ifdef	DIAGNOSTIC
1294 		/* PV entries point only to valid mappings. */
1295 		if ((pte & PG_VALID) == 0)
1296 			panic("pv_syncflags: not PG_VALID at va=0x%lx", va);
1297 #endif
1298 		/* OK, do what we came here for... */
1299 		if (pte & PG_MODREF) {
1300 			flags |= (pte & PG_MODREF);
1301 			pte &= ~PG_MODREF;
1302 		}
1303 
1304 		if (in_ctx == TRUE) {
1305 			/* Did cache flush above. */
1306 			set_pte(va, pte);
1307 		} else {
1308 			set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
1309 		}
1310 	}
1311 	set_context(saved_ctx);
1312 
1313 	return (flags >> PV_SHIFT);
1314 }
1315 
1316 /* Remove all mappings for the physical page. */
1317 static void
1318 pv_remove_all(pa)
1319 	paddr_t pa;
1320 {
1321 	pv_entry_t *head, pv;
1322 	pmap_t pmap;
1323 	vaddr_t va;
1324 
1325 	CHECK_SPL();
1326 
1327 #ifdef PMAP_DEBUG
1328 	if (pmap_debug & PMD_REMOVE)
1329 		printf("pv_remove_all(0x%lx)\n", pa);
1330 #endif
1331 
1332 	head = pa_to_pvhead(pa);
1333 	while ((pv = *head) != NULL) {
1334 		pmap = pv->pv_pmap;
1335 		va   = pv->pv_va;
1336 		pmap_remove1(pmap, va, va + NBPG);
1337 #ifdef PMAP_DEBUG
1338 		/* Make sure it went away. */
1339 		if (pv == *head) {
1340 			db_printf("pv_remove_all: head unchanged for pa=0x%lx\n", pa);
1341 			Debugger();
1342 		}
1343 #endif
1344 	}
1345 }
1346 
1347 /*
1348  * The pmap system is asked to lookup all mappings that point to a
1349  * given physical memory address.  This function adds a new element
1350  * to the list of mappings maintained for the given physical address.
1351  * Returns PV_NC if the (new) pvlist says that the address cannot
1352  * be cached.
1353  */
1354 static int
1355 pv_link(pmap, pte, va)
1356 	pmap_t pmap;
1357 	int pte;
1358 	vaddr_t va;
1359 {
1360 	paddr_t pa;
1361 	pv_entry_t *head, pv;
1362 	u_char *pv_flags;
1363 	int flags;
1364 
1365 	if (!pv_initialized)
1366 		return 0;
1367 
1368 	CHECK_SPL();
1369 
1370 	/* Only the non-cached bit is of interest here. */
1371 	flags = (pte & PG_NC) ? PV_NC : 0;
1372 	pa = PG_PA(pte);
1373 
1374 #ifdef PMAP_DEBUG
1375 	if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1376 		printf("pv_link(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1377 		/* pv_print(pa); */
1378 	}
1379 #endif
1380 
1381 	pv_flags = pa_to_pvflags(pa);
1382 	head     = pa_to_pvhead(pa);
1383 
1384 #ifdef	DIAGNOSTIC
1385 	/* See if this mapping is already in the list. */
1386 	for (pv = *head; pv != NULL; pv = pv->pv_next) {
1387 		if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
1388 			panic("pv_link: duplicate entry for PA=0x%lx", pa);
1389 	}
1390 #endif
1391 #ifdef HAVECACHE
1392 
1393 	/*
1394 	 * Does this new mapping cause VAC alias problems?
1395 	 */
1396 
1397 	*pv_flags |= flags;
1398 	if ((*pv_flags & PV_NC) == 0) {
1399 		for (pv = *head; pv != NULL; pv = pv->pv_next) {
1400 			if (BADALIAS(va, pv->pv_va)) {
1401 				*pv_flags |= PV_NC;
1402 				pv_changepte(pa, PG_NC, 0);
1403 				pmap_stats.ps_vac_uncached++;
1404 				break;
1405 			}
1406 		}
1407 	}
1408 #endif
1409 
1410 	/* Allocate a PV element (pv_alloc()). */
1411 	pv = pv_free_list;
1412 	if (pv == NULL)
1413 		panic("pv_link: pv_alloc");
1414 	pv_free_list = pv->pv_next;
1415 	pv->pv_next = 0;
1416 
1417 	/* Insert new entry at the head. */
1418 	pv->pv_pmap = pmap;
1419 	pv->pv_va   = va;
1420 	pv->pv_next = *head;
1421 	*head = pv;
1422 
1423 	return (*pv_flags & PV_NC);
1424 }
1425 
1426 /*
1427  * pv_unlink is a helper function for pmap_remove.
1428  * It removes the appropriate (pmap, pa, va) entry.
1429  *
1430  * Once the entry is removed, if the pv_table head has the cache
1431  * inhibit bit set, see if we can turn that off; if so, walk the
1432  * pvlist and turn off PG_NC in each PTE.  (The pvlist is by
1433  * definition nonempty, since it must have at least two elements
1434  * in it to have PV_NC set, and we only remove one here.)
1435  */
1436 static void
1437 pv_unlink(pmap, pte, va)
1438 	pmap_t pmap;
1439 	int pte;
1440 	vaddr_t va;
1441 {
1442 	paddr_t pa;
1443 	pv_entry_t *head, *ppv, pv;
1444 	u_char *pv_flags;
1445 
1446 	CHECK_SPL();
1447 
1448 	pa = PG_PA(pte);
1449 #ifdef PMAP_DEBUG
1450 	if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1451 		printf("pv_unlink(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1452 		/* pv_print(pa); */
1453 	}
1454 #endif
1455 
1456 	pv_flags = pa_to_pvflags(pa);
1457 	head     = pa_to_pvhead(pa);
1458 
1459 	/*
1460 	 * Find the entry.
1461 	 */
1462 	ppv = head;
1463 	pv = *ppv;
1464 	while (pv) {
1465 		if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
1466 			goto found;
1467 		ppv = &pv->pv_next;
1468 		pv  =  pv->pv_next;
1469 	}
1470 #ifdef PMAP_DEBUG
1471 	db_printf("pv_unlink: not found (pa=0x%lx,va=0x%lx)\n", pa, va);
1472 	Debugger();
1473 #endif
1474 	return;
1475 
1476 found:
1477 	/* Unlink this entry from the list and clear it. */
1478 	*ppv = pv->pv_next;
1479 	pv->pv_pmap = NULL;
1480 	pv->pv_va   = 0;
1481 
1482 	/* Insert it on the head of the free list. (pv_free()) */
1483 	pv->pv_next = pv_free_list;
1484 	pv_free_list = pv;
1485 	pv = NULL;
1486 
1487 	/* Do any non-cached mappings remain? */
1488 	if ((*pv_flags & PV_NC) == 0)
1489 		return;
1490 	if ((pv = *head) == NULL)
1491 		return;
1492 
1493 	/*
1494 	 * Have non-cached mappings.  See if we can fix that now.
1495 	 */
1496 	va = pv->pv_va;
1497 	for (pv = pv->pv_next; pv != NULL; pv = pv->pv_next) {
1498 		/* If there is a DVMA mapping, leave it NC. */
1499 		if (va >= DVMA_MAP_BASE)
1500 			return;
1501 		/* If there are VAC alias problems, leave NC. */
1502 		if (BADALIAS(va, pv->pv_va))
1503 			return;
1504 	}
1505 	/* OK, there are no "problem" mappings. */
1506 	*pv_flags &= ~PV_NC;
1507 	pv_changepte(pa, 0, PG_NC);
1508 	pmap_stats.ps_vac_recached++;
1509 }
1510 
1511 
1512 /****************************************************************
1513  * Bootstrap and Initialization, etc.
1514  */
1515 
1516 void
1517 pmap_common_init(pmap)
1518 	pmap_t pmap;
1519 {
1520 	memset(pmap, 0, sizeof(struct pmap));
1521 	pmap->pm_refcount = 1;
1522 	pmap->pm_version = pmap_version++;
1523 	pmap->pm_ctxnum = EMPTY_CONTEXT;
1524 	simple_lock_init(&pmap->pm_lock);
1525 }
1526 
1527 /*
1528  * Prepare the kernel for VM operations.
1529  * This is called by locore2.c:_vm_init()
1530  * after the "start/end" globals are set.
1531  * This function must NOT leave context zero.
1532  */
1533 void
1534 pmap_bootstrap(nextva)
1535 	vaddr_t nextva;
1536 {
1537 	vaddr_t va, eva;
1538 	int i, pte, sme;
1539 	extern char etext[];
1540 
1541 	nextva = m68k_round_page(nextva);
1542 
1543 	/* Steal some special-purpose, already mapped pages? */
1544 
1545 	/*
1546 	 * Determine the range of kernel virtual space available.
1547 	 * It is segment-aligned to simplify PMEG management.
1548 	 */
1549 	virtual_avail = m68k_round_seg(nextva);
1550 	virtual_end = VM_MAX_KERNEL_ADDRESS;
1551 
1552 	/*
1553 	 * Determine the range of physical memory available.
1554 	 */
1555 	avail_start = nextva;
1556 	avail_end = prom_memsize();
1557 	avail_end = m68k_trunc_page(avail_end);
1558 
1559 	/*
1560 	 * Report the actual amount of physical memory,
1561 	 * even though the PROM takes a few pages.
1562 	 */
1563 	physmem = (btoc(avail_end) + 0xF) & ~0xF;
1564 
1565 	/*
1566 	 * Done allocating PAGES of virtual space, so
1567 	 * clean out the rest of the last used segment.
1568 	 */
1569 	for (va = nextva; va < virtual_avail; va += NBPG)
1570 		set_pte(va, PG_INVAL);
1571 
1572 	/*
1573 	 * Now that we are done stealing physical pages, etc.
1574 	 * figure out which PMEGs are used by those mappings
1575 	 * and either reserve them or clear them out.
1576 	 * -- but first, init PMEG management.
1577 	 * This puts all PMEGs in the free list.
1578 	 * We will allocte the in-use ones.
1579 	 */
1580 	pmeg_init();
1581 
1582 	/*
1583 	 * Reserve PMEGS for kernel text/data/bss
1584 	 * and the misc pages taken above.
1585 	 * VA range: [KERNBASE .. virtual_avail]
1586 	 */
1587 	for (va = KERNBASE; va < virtual_avail; va += NBSG) {
1588 		sme = get_segmap(va);
1589 		if (sme == SEGINV) {
1590 			prom_printf("kernel text/data/bss not mapped\n");
1591 			prom_abort();
1592 		}
1593 		pmeg_reserve(sme);
1594 	}
1595 
1596 	/*
1597 	 * Unmap kernel virtual space.  Make sure to leave no valid
1598 	 * segmap entries in the MMU unless pmeg_array records them.
1599 	 * VA range: [vseg_avail .. virtual_end]
1600 	 */
1601 	for ( ; va < virtual_end; va += NBSG)
1602 		set_segmap(va, SEGINV);
1603 
1604 	/*
1605 	 * Reserve PMEGs used by the PROM monitor (device mappings).
1606 	 * Free up any pmegs in this range which have no mappings.
1607 	 * VA range: [0x00E00000 .. 0x00F00000]
1608 	 */
1609 	pmeg_mon_init(SUN2_MONSTART, SUN2_MONEND, TRUE);
1610 
1611 	/*
1612 	 * Unmap any pmegs left in DVMA space by the PROM.
1613 	 * DO NOT kill the last one! (owned by the PROM!)
1614 	 * VA range: [0x00F00000 .. 0x00FE0000]
1615 	 */
1616 	pmeg_mon_init(SUN2_MONEND, SUN2_MONEND + DVMA_MAP_SIZE, FALSE);
1617 
1618 	/*
1619 	 * Done reserving PMEGs and/or clearing out mappings.
1620 	 *
1621 	 * Now verify the mapping protections and such for the
1622 	 * important parts of the address space (in VA order).
1623 	 * Note that the Sun PROM usually leaves the memory
1624 	 * mapped with everything non-cached...
1625 	 */
1626 
1627 	/*
1628 	 * On a Sun2, the boot loader loads the kernel exactly where
1629 	 * it is linked, at physical/virtual 0x6000 (KERNBASE).  This
1630 	 * means there are twelve physical/virtual pages before the
1631 	 * kernel text begins.
1632 	 */
1633 	va = 0;
1634 
1635 	/*
1636 	 * Physical/virtual pages zero through three are used by the
1637 	 * PROM.  prom_init has already saved the PTEs, but we don't
1638 	 * want to unmap the pages until we've installed our own
1639 	 * vector table - just in case something happens before then
1640 	 * and we drop into the PROM.
1641 	 */
1642 	eva = va + NBPG * 4;
1643 	va = eva;
1644 
1645 	/*
1646 	 * We use pages four through seven for the msgbuf.
1647 	 */
1648 	eva = va + NBPG * 4;
1649 	for(; va < eva; va += NBPG) {
1650 		pte = get_pte(va);
1651 		pte |= (PG_SYSTEM | PG_WRITE | PG_NC);
1652 		set_pte(va, pte);
1653 	}
1654 	/* Initialize msgbufaddr later, in machdep.c */
1655 
1656 	/*
1657 	 * On the Sun3, two of the three dead pages in SUN3_MONSHORTSEG
1658 	 * are used for tmp_vpages.  The Sun2 doesn't have this
1659 	 * short-segment concept, so we reserve virtual pages eight
1660 	 * and nine for this.
1661 	 */
1662 	set_pte(va, PG_INVAL);
1663 	va += NBPG;
1664 	set_pte(va, PG_INVAL);
1665 	va += NBPG;
1666 
1667 	/*
1668 	 * Pages ten and eleven remain for the temporary kernel stack,
1669 	 * which is set up by locore.s.  Hopefully this is enough space.
1670 	 */
1671 	eva = va + NBPG * 2;
1672 	for(; va < eva ; va += NBPG) {
1673 		pte = get_pte(va);
1674 		pte &= ~(PG_NC);
1675 		pte |= (PG_SYSTEM | PG_WRITE);
1676 		set_pte(va, pte);
1677 	}
1678 
1679 	/*
1680 	 * Next is the kernel text.
1681 	 *
1682 	 * Verify protection bits on kernel text/data/bss
1683 	 * All of kernel text, data, and bss are cached.
1684 	 * Text is read-only (except in db_write_ktext).
1685 	 */
1686 	eva = m68k_trunc_page(etext);
1687 	while (va < eva) {
1688 		pte = get_pte(va);
1689 		if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
1690 			prom_printf("invalid page at 0x%x\n", va);
1691 		}
1692 		pte &= ~(PG_WRITE|PG_NC);
1693 		/* Kernel text is read-only */
1694 		pte |= (PG_SYSTEM);
1695 		set_pte(va, pte);
1696 		va += NBPG;
1697 	}
1698 	/* data, bss, etc. */
1699 	while (va < nextva) {
1700 		pte = get_pte(va);
1701 		if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
1702 			prom_printf("invalid page at 0x%x\n", va);
1703 		}
1704 		pte &= ~(PG_NC);
1705 		pte |= (PG_SYSTEM | PG_WRITE);
1706 		set_pte(va, pte);
1707 		va += NBPG;
1708 	}
1709 
1710 	/*
1711 	 * Initialize all of the other contexts.
1712 	 */
1713 #ifdef	DIAGNOSTIC
1714 	/* Near the beginning of locore.s we set context zero. */
1715 	if (get_context() != 0) {
1716 		prom_printf("pmap_bootstrap: not in context zero?\n");
1717 		prom_abort();
1718 	}
1719 #endif	/* DIAGNOSTIC */
1720 	for (va = 0; va < (vaddr_t) (NBSG * NSEGMAP); va += NBSG) {
1721 		for (i = 1; i < NCONTEXT; i++) {
1722 			set_context(i);
1723 			set_segmap(va, SEGINV);
1724 		}
1725 	}
1726 	set_context(KERNEL_CONTEXT);
1727 
1728 	/*
1729 	 * Reserve a segment for the kernel to use to access a pmeg
1730 	 * that is not currently mapped into any context/segmap.
1731 	 * The kernel temporarily maps such a pmeg into this segment.
1732 	 */
1733 	temp_seg_va = virtual_avail;
1734 	virtual_avail += NBSG;
1735 #ifdef	DIAGNOSTIC
1736 	if (temp_seg_va & SEGOFSET) {
1737 		prom_printf("pmap_bootstrap: temp_seg_va\n");
1738 		prom_abort();
1739 	}
1740 #endif
1741 
1742 	/* Initialization for pmap_next_page() */
1743 	avail_next = avail_start;
1744 
1745 	uvmexp.pagesize = NBPG;
1746 	uvm_setpagesize();
1747 
1748 	/* after setting up some structures */
1749 
1750 	pmap_common_init(kernel_pmap);
1751 	pmap_kernel_init(kernel_pmap);
1752 
1753 	context_init();
1754 
1755 	pmeg_clean_free();
1756 
1757 	pmap_page_upload();
1758 }
1759 
1760 /*
1761  * Give the kernel pmap a segmap, just so there are not
1762  * so many special cases required.  Maybe faster too,
1763  * because this lets pmap_remove() and pmap_protect()
1764  * use a S/W copy of the segmap to avoid function calls.
1765  */
1766 void
1767 pmap_kernel_init(pmap)
1768 	 pmap_t pmap;
1769 {
1770 	vaddr_t va;
1771 	int i, sme;
1772 
1773 	for (i=0, va=0; i < NSEGMAP; i++, va+=NBSG) {
1774 		sme = get_segmap(va);
1775 		kernel_segmap[i] = sme;
1776 	}
1777 	pmap->pm_segmap = kernel_segmap;
1778 }
1779 
1780 
1781 /****************************************************************
1782  * PMAP interface functions.
1783  */
1784 
1785 /*
1786  * Support functions for vm_page_bootstrap().
1787  */
1788 
1789 /*
1790  * How much virtual space does this kernel have?
1791  * (After mapping kernel text, data, etc.)
1792  */
1793 void
1794 pmap_virtual_space(v_start, v_end)
1795 	vaddr_t *v_start;
1796 	vaddr_t *v_end;
1797 {
1798 	*v_start = virtual_avail;
1799 	*v_end   = virtual_end;
1800 }
1801 
1802 /* Provide memory to the VM system. */
1803 static void
1804 pmap_page_upload()
1805 {
1806 	int a, b, c, d;
1807 
1808 	if (hole_size) {
1809 		/*
1810 		 * Supply the memory in two segments so the
1811 		 * reserved memory (3/50 video ram at 1MB)
1812 		 * can be carved from the front of the 2nd.
1813 		 */
1814 		a = atop(avail_start);
1815 		b = atop(hole_start);
1816 		uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
1817 		c = atop(hole_start + hole_size);
1818 		d = atop(avail_end);
1819 		uvm_page_physload(b, d, c, d, VM_FREELIST_DEFAULT);
1820 	} else {
1821 		a = atop(avail_start);
1822 		d = atop(avail_end);
1823 		uvm_page_physload(a, d, a, d, VM_FREELIST_DEFAULT);
1824 	}
1825 }
1826 
1827 /*
1828  *	Initialize the pmap module.
1829  *	Called by vm_init, to initialize any structures that the pmap
1830  *	system needs to map virtual memory.
1831  */
1832 void
1833 pmap_init()
1834 {
1835 	pv_init();
1836 
1837 	/* Initialize the pmap pool. */
1838 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1839 	    &pool_allocator_nointr);
1840 }
1841 
1842 /*
1843  * Map a range of kernel virtual address space.
1844  * This might be used for device mappings, or to
1845  * record the mapping for kernel text/data/bss.
1846  * Return VA following the mapped range.
1847  */
1848 vaddr_t
1849 pmap_map(va, pa, endpa, prot)
1850 	vaddr_t	va;
1851 	paddr_t	pa;
1852 	paddr_t	endpa;
1853 	int		prot;
1854 {
1855 	int sz;
1856 
1857 	sz = endpa - pa;
1858 	do {
1859 		pmap_enter(kernel_pmap, va, pa, prot, 0);
1860 		va += NBPG;
1861 		pa += NBPG;
1862 		sz -= NBPG;
1863 	} while (sz > 0);
1864 	pmap_update(kernel_pmap);
1865 	return(va);
1866 }
1867 
1868 void
1869 pmap_user_init(pmap)
1870 	pmap_t pmap;
1871 {
1872 	int i;
1873 	pmap->pm_segmap = malloc(sizeof(char)*NUSEG, M_VMPMAP, M_WAITOK);
1874 	for (i = 0; i < NUSEG; i++) {
1875 		pmap->pm_segmap[i] = SEGINV;
1876 	}
1877 }
1878 
1879 /*
1880  *	Create and return a physical map.
1881  *
1882  *	If the size specified for the map
1883  *	is zero, the map is an actual physical
1884  *	map, and may be referenced by the
1885  *	hardware.
1886  *
1887  *	If the size specified is non-zero,
1888  *	the map will be used in software only, and
1889  *	is bounded by that size.
1890  */
1891 pmap_t
1892 pmap_create()
1893 {
1894 	pmap_t pmap;
1895 
1896 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1897 	pmap_pinit(pmap);
1898 	return pmap;
1899 }
1900 
1901 /*
1902  * Release any resources held by the given physical map.
1903  * Called when a pmap initialized by pmap_pinit is being released.
1904  * Should only be called if the map contains no valid mappings.
1905  */
1906 void
1907 pmap_release(pmap)
1908 	struct pmap *pmap;
1909 {
1910 	int s;
1911 
1912 	s = splvm();
1913 
1914 	if (pmap == kernel_pmap)
1915 		panic("pmap_release: kernel_pmap!");
1916 
1917 	if (has_context(pmap)) {
1918 #ifdef	PMAP_DEBUG
1919 		if (pmap_debug & PMD_CONTEXT)
1920 			printf("pmap_release(%p): free ctx %d\n",
1921 				   pmap, pmap->pm_ctxnum);
1922 #endif
1923 		context_free(pmap);
1924 	}
1925 	free(pmap->pm_segmap, M_VMPMAP);
1926 	pmap->pm_segmap = NULL;
1927 
1928 	splx(s);
1929 }
1930 
1931 
1932 /*
1933  *	Retire the given physical map from service.
1934  *	Should only be called if the map contains
1935  *	no valid mappings.
1936  */
1937 void
1938 pmap_destroy(pmap)
1939 	pmap_t pmap;
1940 {
1941 	int count;
1942 
1943 #ifdef PMAP_DEBUG
1944 	if (pmap_debug & PMD_CREATE)
1945 		printf("pmap_destroy(%p)\n", pmap);
1946 #endif
1947 	if (pmap == kernel_pmap)
1948 		panic("pmap_destroy: kernel_pmap!");
1949 	pmap_lock(pmap);
1950 	count = pmap_del_ref(pmap);
1951 	pmap_unlock(pmap);
1952 	if (count == 0) {
1953 		pmap_release(pmap);
1954 		pool_put(&pmap_pmap_pool, pmap);
1955 	}
1956 }
1957 
1958 /*
1959  *	Add a reference to the specified pmap.
1960  */
1961 void
1962 pmap_reference(pmap)
1963 	pmap_t	pmap;
1964 {
1965 	pmap_lock(pmap);
1966 	pmap_add_ref(pmap);
1967 	pmap_unlock(pmap);
1968 }
1969 
1970 
1971 /*
1972  *	Insert the given physical page (p) at
1973  *	the specified virtual address (v) in the
1974  *	target physical map with the protection requested.
1975  *
1976  *	The physical address is page aligned, but may have some
1977  *	low bits set indicating an OBIO or VME bus page, or just
1978  *	that the non-cache bit should be set (i.e PMAP_NC).
1979  *
1980  *	If specified, the page will be wired down, meaning
1981  *	that the related pte can not be reclaimed.
1982  *
1983  *	NB:  This is the only routine which MAY NOT lazy-evaluate
1984  *	or lose information.  That is, this routine must actually
1985  *	insert this page into the given map NOW.
1986  */
1987 int
1988 pmap_enter(pmap, va, pa, prot, flags)
1989 	pmap_t pmap;
1990 	vaddr_t va;
1991 	paddr_t pa;
1992 	vm_prot_t prot;
1993 	int flags;
1994 {
1995 	int new_pte, s;
1996 	boolean_t wired = (flags & PMAP_WIRED) != 0;
1997 
1998 #ifdef	PMAP_DEBUG
1999 	if ((pmap_debug & PMD_ENTER) ||
2000 		(va == pmap_db_watchva))
2001 		printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
2002 			   pmap, va, pa, prot, wired);
2003 #endif
2004 
2005 	/* Get page-type bits from low part of the PA... */
2006 	new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
2007 
2008 	/* ...now the valid and writable bits... */
2009 	new_pte |= PG_VALID;
2010 	if (prot & VM_PROT_WRITE)
2011 		new_pte |= PG_WRITE;
2012 
2013 	/* ...and finally the page-frame number. */
2014 	new_pte |= PA_PGNUM(pa);
2015 
2016 	/*
2017 	 * treatment varies significantly:
2018 	 *  kernel ptes are always in the mmu
2019 	 *  user ptes may not necessarily? be in the mmu.  pmap may not
2020 	 *   be in the mmu either.
2021 	 *
2022 	 */
2023 	s = splvm();
2024 	if (pmap == kernel_pmap) {
2025 		new_pte |= PG_SYSTEM;
2026 		pmap_enter_kernel(va, new_pte, wired);
2027 	} else {
2028 		pmap_enter_user(pmap, va, new_pte, wired);
2029 	}
2030 	splx(s);
2031 	return 0;
2032 }
2033 
2034 static void
2035 pmap_enter_kernel(pgva, new_pte, wired)
2036 	vaddr_t pgva;
2037 	int new_pte;
2038 	boolean_t wired;
2039 {
2040 	pmap_t pmap = kernel_pmap;
2041 	pmeg_t pmegp;
2042 	int do_pv, old_pte, sme;
2043 	vaddr_t segva;
2044 	int saved_ctx;
2045 
2046 	/*
2047 	  need to handle possibly allocating additional pmegs
2048 	  need to make sure they cant be stolen from the kernel;
2049 	  map any new pmegs into context zero, make sure rest of pmeg is null;
2050 	  deal with pv_stuff; possibly caching problems;
2051 	  must also deal with changes too.
2052 	  */
2053 	saved_ctx = get_context();
2054 	set_context(KERNEL_CONTEXT);
2055 
2056 	/*
2057 	 * In detail:
2058 	 *
2059 	 * (a) lock pmap
2060 	 * (b) Is the VA in a already mapped segment, if so
2061 	 *	 look to see if that VA address is "valid".  If it is, then
2062 	 *	 action is a change to an existing pte
2063 	 * (c) if not mapped segment, need to allocate pmeg
2064 	 * (d) if adding pte entry or changing physaddr of existing one,
2065 	 *		use pv_stuff, for change, pmap_remove() possibly.
2066 	 * (e) change/add pte
2067 	 */
2068 
2069 #ifdef	DIAGNOSTIC
2070 	if ((pgva < virtual_avail) || (pgva >= DVMA_MAP_END))
2071 		panic("pmap_enter_kernel: bad va=0x%lx", pgva);
2072 	if ((new_pte & (PG_VALID | PG_SYSTEM)) != (PG_VALID | PG_SYSTEM))
2073 		panic("pmap_enter_kernel: bad pte");
2074 #endif
2075 
2076 	if (pgva >= DVMA_MAP_BASE) {
2077 		/* This is DVMA space.  Always want it non-cached. */
2078 		new_pte |= PG_NC;
2079 	}
2080 
2081 	segva = m68k_trunc_seg(pgva);
2082 	do_pv = TRUE;
2083 
2084 	/* Do we have a PMEG? */
2085 	sme = get_segmap(segva);
2086 	if (sme != SEGINV) {
2087 		/* Found a PMEG in the segmap.  Cool. */
2088 		pmegp = pmeg_p(sme);
2089 #ifdef	DIAGNOSTIC
2090 		/* Make sure it is the right PMEG. */
2091 		if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
2092 			panic("pmap_enter_kernel: wrong sme at VA=0x%lx", segva);
2093 		/* Make sure it is ours. */
2094 		if (pmegp->pmeg_owner != pmap)
2095 			panic("pmap_enter_kernel: MMU has bad pmeg 0x%x", sme);
2096 #endif
2097 	} else {
2098 		/* No PMEG in the segmap.  Have to allocate one. */
2099 		pmegp = pmeg_allocate(pmap, segva);
2100 		sme = pmegp->pmeg_index;
2101 		pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
2102 		set_segmap(segva, sme);
2103 #ifdef	PMAP_DEBUG
2104 		pmeg_verify_empty(segva);
2105 		if (pmap_debug & PMD_SEGMAP) {
2106 			printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x (ek)\n",
2107 				   pmap, segva, sme);
2108 		}
2109 #endif
2110 		/* There are no existing mappings to deal with. */
2111 		old_pte = 0;
2112 		goto add_pte;
2113 	}
2114 
2115 	/*
2116 	 * We have a PMEG.  Is the VA already mapped to somewhere?
2117 	 *	(a) if so, is it same pa? (really a protection change)
2118 	 *	(b) if not same pa, then we have to unlink from old pa
2119 	 */
2120 	old_pte = get_pte(pgva);
2121 	if ((old_pte & PG_VALID) == 0)
2122 		goto add_pte;
2123 
2124 	/* Have valid translation.  Flush cache before changing it. */
2125 #ifdef	HAVECACHE
2126 	if (cache_size) {
2127 		cache_flush_page(pgva);
2128 		/* Get fresh mod/ref bits from write-back. */
2129 		old_pte = get_pte(pgva);
2130 	}
2131 #endif
2132 
2133 	/* XXX - removing valid page here, way lame... -glass */
2134 	pmegp->pmeg_vpages--;
2135 
2136 	if (!IS_MAIN_MEM(old_pte)) {
2137 		/* Was not main memory, so no pv_entry for it. */
2138 		goto add_pte;
2139 	}
2140 
2141 	/* Old mapping was main memory.  Save mod/ref bits. */
2142 	save_modref_bits(old_pte);
2143 
2144 	/*
2145 	 * If not changing the type or pfnum then re-use pv_entry.
2146 	 * Note we get here only with old_pte having PGT_OBMEM.
2147 	 */
2148 	if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) {
2149 		do_pv = FALSE;		/* re-use pv_entry */
2150 		new_pte |= (old_pte & PG_NC);
2151 		goto add_pte;
2152 	}
2153 
2154 	/* OK, different type or PA, have to kill old pv_entry. */
2155 	pv_unlink(pmap, old_pte, pgva);
2156 
2157  add_pte:	/* can be destructive */
2158 	pmeg_set_wiring(pmegp, pgva, wired);
2159 
2160 	/* Anything but MAIN_MEM is mapped non-cached. */
2161 	if (!IS_MAIN_MEM(new_pte)) {
2162 		new_pte |= PG_NC;
2163 		do_pv = FALSE;
2164 	}
2165 	if (do_pv == TRUE) {
2166 		if (pv_link(pmap, new_pte, pgva) & PV_NC)
2167 			new_pte |= PG_NC;
2168 	}
2169 #ifdef	PMAP_DEBUG
2170 	if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
2171 		printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x (ek)\n",
2172 			   pmap, pgva, old_pte, new_pte);
2173 	}
2174 #endif
2175 	/* cache flush done above */
2176 	set_pte(pgva, new_pte);
2177 	set_context(saved_ctx);
2178 	pmegp->pmeg_vpages++;
2179 }
2180 
2181 
2182 static void
2183 pmap_enter_user(pmap, pgva, new_pte, wired)
2184 	pmap_t pmap;
2185 	vaddr_t pgva;
2186 	int new_pte;
2187 	boolean_t wired;
2188 {
2189 	int do_pv, old_pte, sme;
2190 	vaddr_t segva;
2191 	pmeg_t pmegp;
2192 
2193 #ifdef	DIAGNOSTIC
2194 	if (pgva >= VM_MAXUSER_ADDRESS)
2195 		panic("pmap_enter_user: bad va=0x%lx", pgva);
2196 	if ((new_pte & (PG_VALID | PG_SYSTEM)) != PG_VALID)
2197 		panic("pmap_enter_user: bad pte");
2198 #endif
2199 #ifdef	PMAP_DEBUG
2200 	/*
2201 	 * Some user pages are wired here, and a later
2202 	 * call to pmap_unwire() will unwire them.
2203 	 * XXX - Need a separate list for wired user pmegs
2204 	 * so they can not be stolen from the active list.
2205 	 * XXX - Note: vm_fault.c assumes pmap_extract will
2206 	 * work on wired mappings, so must preserve them...
2207 	 * XXX: Maybe keep a list of wired PMEGs?
2208 	 */
2209 	if (wired && (pmap_debug & PMD_WIRING)) {
2210 		db_printf("pmap_enter_user: attempt to wire user page, ignored\n");
2211 		Debugger();
2212 	}
2213 #endif
2214 
2215 	/* Validate this assumption. */
2216 	if (pmap != current_pmap()) {
2217 #ifdef	PMAP_DEBUG
2218 		/* Aparently, this never happens. */
2219 		db_printf("pmap_enter_user: not curproc\n");
2220 		Debugger();
2221 #endif
2222 		/* Just throw it out (fault it in later). */
2223 		/* XXX: But must remember it if wired... */
2224 		return;
2225 	}
2226 
2227 	segva = m68k_trunc_seg(pgva);
2228 	do_pv = TRUE;
2229 
2230 	/*
2231 	 * If this pmap was sharing the "empty" context,
2232 	 * allocate a real context for its exclusive use.
2233 	 */
2234 	if (!has_context(pmap)) {
2235 		context_allocate(pmap);
2236 #ifdef PMAP_DEBUG
2237 		if (pmap_debug & PMD_CONTEXT)
2238 			printf("pmap_enter(%p) got context %d\n",
2239 				   pmap, pmap->pm_ctxnum);
2240 #endif
2241 		set_context(pmap->pm_ctxnum);
2242 	} else {
2243 #ifdef	PMAP_DEBUG
2244 		/* Make sure context is correct. */
2245 		if (pmap->pm_ctxnum != get_context()) {
2246 			db_printf("pmap_enter_user: wrong context\n");
2247 			Debugger();
2248 			/* XXX: OK to proceed? */
2249 			set_context(pmap->pm_ctxnum);
2250 		}
2251 #endif
2252 	}
2253 
2254 	/*
2255 	 * We have a context.  Do we have a PMEG?
2256 	 */
2257 	sme = get_segmap(segva);
2258 	if (sme != SEGINV) {
2259 		/* Found a PMEG in the segmap.  Cool. */
2260 		pmegp = pmeg_p(sme);
2261 #ifdef	DIAGNOSTIC
2262 		/* Make sure it is the right PMEG. */
2263 		if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
2264 			panic("pmap_enter_user: wrong sme at VA=0x%lx", segva);
2265 		/* Make sure it is ours. */
2266 		if (pmegp->pmeg_owner != pmap)
2267 			panic("pmap_enter_user: MMU has bad pmeg 0x%x", sme);
2268 #endif
2269 	} else {
2270 		/* Not in the segmap.  Try the S/W cache. */
2271 		pmegp = pmeg_cache(pmap, segva);
2272 		if (pmegp) {
2273 			/* Found PMEG in cache.  Just reload it. */
2274 			sme = pmegp->pmeg_index;
2275 			set_segmap(segva, sme);
2276 		} else {
2277 			/* PMEG not in cache, so allocate one. */
2278 			pmegp = pmeg_allocate(pmap, segva);
2279 			sme = pmegp->pmeg_index;
2280 			pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
2281 			set_segmap(segva, sme);
2282 #ifdef	PMAP_DEBUG
2283 			pmeg_verify_empty(segva);
2284 #endif
2285 		}
2286 #ifdef	PMAP_DEBUG
2287 		if (pmap_debug & PMD_SEGMAP) {
2288 			printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x (eu)\n",
2289 				   pmap, segva, sme);
2290 		}
2291 #endif
2292 	}
2293 
2294 	/*
2295 	 * We have a PMEG.  Is the VA already mapped to somewhere?
2296 	 *	(a) if so, is it same pa? (really a protection change)
2297 	 *	(b) if not same pa, then we have to unlink from old pa
2298 	 */
2299 	old_pte = get_pte(pgva);
2300 	if ((old_pte & PG_VALID) == 0)
2301 		goto add_pte;
2302 
2303 	/* Have valid translation.  Flush cache before changing it. */
2304 #ifdef	HAVECACHE
2305 	if (cache_size) {
2306 		cache_flush_page(pgva);
2307 		/* Get fresh mod/ref bits from write-back. */
2308 		old_pte = get_pte(pgva);
2309 	}
2310 #endif
2311 
2312 	/* XXX - removing valid page here, way lame... -glass */
2313 	pmegp->pmeg_vpages--;
2314 
2315 	if (!IS_MAIN_MEM(old_pte)) {
2316 		/* Was not main memory, so no pv_entry for it. */
2317 		goto add_pte;
2318 	}
2319 
2320 	/* Old mapping was main memory.  Save mod/ref bits. */
2321 	save_modref_bits(old_pte);
2322 
2323 	/*
2324 	 * If not changing the type or pfnum then re-use pv_entry.
2325 	 * Note we get here only with old_pte having PGT_OBMEM.
2326 	 */
2327 	if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) {
2328 		do_pv = FALSE;		/* re-use pv_entry */
2329 		new_pte |= (old_pte & PG_NC);
2330 		goto add_pte;
2331 	}
2332 
2333 	/* OK, different type or PA, have to kill old pv_entry. */
2334 	pv_unlink(pmap, old_pte, pgva);
2335 
2336  add_pte:
2337 	/* XXX - Wiring changes on user pmaps? */
2338 	/* pmeg_set_wiring(pmegp, pgva, wired); */
2339 
2340 	/* Anything but MAIN_MEM is mapped non-cached. */
2341 	if (!IS_MAIN_MEM(new_pte)) {
2342 		new_pte |= PG_NC;
2343 		do_pv = FALSE;
2344 	}
2345 	if (do_pv == TRUE) {
2346 		if (pv_link(pmap, new_pte, pgva) & PV_NC)
2347 			new_pte |= PG_NC;
2348 	}
2349 #ifdef	PMAP_DEBUG
2350 	if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
2351 		printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x (eu)\n",
2352 			   pmap, pgva, old_pte, new_pte);
2353 	}
2354 #endif
2355 	/* cache flush done above */
2356 	set_pte(pgva, new_pte);
2357 	pmegp->pmeg_vpages++;
2358 }
2359 
2360 void
2361 pmap_kenter_pa(va, pa, prot)
2362 	vaddr_t va;
2363 	paddr_t pa;
2364 	vm_prot_t prot;
2365 {
2366 	int new_pte, s;
2367 	pmap_t pmap = kernel_pmap;
2368 	pmeg_t pmegp;
2369 	int sme;
2370 	vaddr_t segva;
2371 	int saved_ctx;
2372 
2373 #ifdef	PMAP_DEBUG
2374 	if ((pmap_debug & PMD_ENTER) ||
2375 		(va == pmap_db_watchva))
2376 		printf("pmap_kenter_pa(0x%lx, 0x%lx, 0x%x)\n",
2377 			   va, pa, prot);
2378 #endif
2379 
2380 	/* Get page-type bits from low part of the PA... */
2381 	new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
2382 
2383 	/* ...now the valid and writable bits... */
2384 	new_pte |= PG_SYSTEM|PG_VALID;
2385 	if (prot & VM_PROT_WRITE)
2386 		new_pte |= PG_WRITE;
2387 
2388 	/* ...and finally the page-frame number. */
2389 	new_pte |= PA_PGNUM(pa);
2390 
2391 	/*
2392 	 * need to handle possibly allocating additional pmegs
2393 	 * need to make sure they cant be stolen from the kernel;
2394 	 * map any new pmegs into context zero, make sure rest of pmeg is null;
2395 	 * deal with pv_stuff; possibly caching problems;
2396 	 * must also deal with changes too.
2397 	 */
2398 	saved_ctx = get_context();
2399 	set_context(KERNEL_CONTEXT);
2400 
2401 	/*
2402 	 * In detail:
2403 	 *
2404 	 * (a) lock pmap
2405 	 * (b) Is the VA in a already mapped segment, if so
2406 	 *	 look to see if that VA address is "valid".  If it is, then
2407 	 *	 action is a change to an existing pte
2408 	 * (c) if not mapped segment, need to allocate pmeg
2409 	 * (d) change/add pte
2410 	 */
2411 
2412 #ifdef	DIAGNOSTIC
2413 	if ((va < virtual_avail) || (va >= DVMA_MAP_END))
2414 		panic("pmap_kenter_pa: bad va=0x%lx", va);
2415 #endif
2416 
2417 	if (va >= DVMA_MAP_BASE) {
2418 		/* This is DVMA space.  Always want it non-cached. */
2419 		new_pte |= PG_NC;
2420 	}
2421 
2422 	segva = m68k_trunc_seg(va);
2423 
2424 	s = splvm();
2425 
2426 	/* Do we have a PMEG? */
2427 	sme = get_segmap(segva);
2428 	if (sme != SEGINV) {
2429 		KASSERT((get_pte(va) & PG_VALID) == 0);
2430 
2431 		/* Found a PMEG in the segmap.  Cool. */
2432 		pmegp = pmeg_p(sme);
2433 #ifdef	DIAGNOSTIC
2434 		/* Make sure it is the right PMEG. */
2435 		if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
2436 			panic("pmap_kenter_pa: wrong sme at VA=0x%lx", segva);
2437 		/* Make sure it is ours. */
2438 		if (pmegp->pmeg_owner != pmap)
2439 			panic("pmap_kenter_pa: MMU has bad pmeg 0x%x", sme);
2440 #endif
2441 	} else {
2442 
2443 		/* No PMEG in the segmap.  Have to allocate one. */
2444 		pmegp = pmeg_allocate(pmap, segva);
2445 		sme = pmegp->pmeg_index;
2446 		pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
2447 		set_segmap(segva, sme);
2448 #ifdef	PMAP_DEBUG
2449 		pmeg_verify_empty(segva);
2450 		if (pmap_debug & PMD_SEGMAP) {
2451 			printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x (ek)\n",
2452 				   pmap, segva, sme);
2453 		}
2454 #endif
2455 	}
2456 
2457 	pmeg_set_wiring(pmegp, va, TRUE);
2458 
2459 	/* Anything but MAIN_MEM is mapped non-cached. */
2460 	if (!IS_MAIN_MEM(new_pte)) {
2461 		new_pte |= PG_NC;
2462 	}
2463 #ifdef	PMAP_DEBUG
2464 	if ((pmap_debug & PMD_SETPTE) || (va == pmap_db_watchva)) {
2465 		printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x (ek)\n",
2466 			   pmap, va, old_pte, new_pte);
2467 	}
2468 #endif
2469 	/* cache flush done above */
2470 	set_pte(va, new_pte);
2471 	set_context(saved_ctx);
2472 	pmegp->pmeg_vpages++;
2473 	splx(s);
2474 }
2475 
2476 void
2477 pmap_kremove(va, len)
2478 	vaddr_t va;
2479 	vsize_t len;
2480 {
2481 	pmap_t pmap = kernel_pmap;
2482 	vaddr_t eva, neva, pgva, segva, segnum;
2483 	int pte, sme;
2484 	pmeg_t pmegp;
2485 #ifdef	HAVECACHE
2486 	int flush_by_page = 0;
2487 #endif
2488 	int s;
2489 	int saved_ctx;
2490 
2491 	s = splvm();
2492 	saved_ctx = get_context();
2493 	set_context(KERNEL_CONTEXT);
2494 	segnum = VA_SEGNUM(va);
2495 	for (eva = va + len; va < eva; va = neva, segnum++) {
2496 		neva = m68k_trunc_seg(va) + NBSG;
2497 		if (neva > eva) {
2498 			neva = eva;
2499 		}
2500 		if (pmap->pm_segmap[segnum] == SEGINV) {
2501 			continue;
2502 		}
2503 
2504 		segva = m68k_trunc_seg(va);
2505 		sme = get_segmap(segva);
2506 		pmegp = pmeg_p(sme);
2507 
2508 #ifdef	HAVECACHE
2509 		if (cache_size) {
2510 
2511 		    /*
2512 			 * If the range to be removed is larger than the cache,
2513 			 * it will be cheaper to flush this segment entirely.
2514 			 */
2515 
2516 			if (cache_size < (eva - va)) {
2517 				/* cheaper to flush whole segment */
2518 				cache_flush_segment(segva);
2519 			} else {
2520 				flush_by_page = 1;
2521 			}
2522 		}
2523 #endif
2524 
2525 		/* Invalidate the PTEs in the given range. */
2526 		for (pgva = va; pgva < neva; pgva += NBPG) {
2527 			pte = get_pte(pgva);
2528 			if (pte & PG_VALID) {
2529 #ifdef	HAVECACHE
2530 				if (flush_by_page) {
2531 					cache_flush_page(pgva);
2532 					/* Get fresh mod/ref bits from write-back. */
2533 					pte = get_pte(pgva);
2534 				}
2535 #endif
2536 #ifdef	PMAP_DEBUG
2537 				if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
2538 					printf("pmap: set_pte pmap=%p va=0x%lx"
2539 					   " old=0x%x new=0x%x (rrmmu)\n",
2540 					   pmap, pgva, pte, PG_INVAL);
2541 				}
2542 #endif
2543 				set_pte(pgva, PG_INVAL);
2544 				KASSERT(pmegp->pmeg_vpages > 0);
2545 				pmegp->pmeg_vpages--;
2546 			}
2547 		}
2548 		KASSERT(pmegp->pmeg_vpages >= 0);
2549 		if (pmegp->pmeg_vpages == 0) {
2550 			/* We are done with this pmeg. */
2551 #ifdef	PMAP_DEBUG
2552 			if (is_pmeg_wired(pmegp)) {
2553 				if (pmap_debug & PMD_WIRING) {
2554 					db_printf("pmap: removing wired pmeg: %p\n", pmegp);
2555 					Debugger();
2556 				}
2557 			}
2558 			if (pmap_debug & PMD_SEGMAP) {
2559 				printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x new=ff (rm)\n",
2560 					pmap->pm_ctxnum, segva, pmegp->pmeg_index);
2561 			}
2562 			pmeg_verify_empty(segva);
2563 #endif
2564 
2565 			/* Remove it from the MMU. */
2566 			set_segmap(segva, SEGINV);
2567 			pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV;
2568 
2569 			/* Now, put it on the free list. */
2570 			pmeg_free(pmegp);
2571 		}
2572 	}
2573 	set_context(saved_ctx);
2574 	splx(s);
2575 }
2576 
2577 
2578 /*
2579  * The trap handler calls this so we can try to resolve
2580  * user-level faults by reloading a PMEG.
2581  * If that does not prodce a valid mapping,
2582  * call vm_fault as usual.
2583  *
2584  * XXX: Merge this with the next function?
2585  */
2586 int
2587 _pmap_fault(map, va, ftype)
2588 	struct vm_map *map;
2589 	vaddr_t va;
2590 	vm_prot_t ftype;
2591 {
2592 	pmap_t pmap;
2593 	int rv;
2594 
2595 	pmap = vm_map_pmap(map);
2596 	if (map == kernel_map) {
2597 		/* Do not allow faults below the "managed" space. */
2598 		if (va < virtual_avail) {
2599 			/*
2600 			 * Most pages below virtual_avail are read-only,
2601 			 * so I will assume it is a protection failure.
2602 			 */
2603 			return EACCES;
2604 		}
2605 	} else {
2606 		/* User map.  Try reload shortcut. */
2607 		if (pmap_fault_reload(pmap, va, ftype))
2608 			return 0;
2609 	}
2610 	rv = uvm_fault(map, va, 0, ftype);
2611 
2612 #ifdef	PMAP_DEBUG
2613 	if (pmap_debug & PMD_FAULT) {
2614 		printf("pmap_fault(%p, 0x%lx, 0x%x) -> 0x%x\n",
2615 			   map, va, ftype, rv);
2616 	}
2617 #endif
2618 
2619 	return (rv);
2620 }
2621 
2622 /*
2623  * This is a shortcut used by the trap handler to
2624  * reload PMEGs into a user segmap without calling
2625  * the actual VM fault handler.  Returns TRUE if:
2626  *	the PMEG was reloaded, and
2627  *	it has a valid PTE at va.
2628  * Otherwise return zero and let VM code handle it.
2629  */
2630 int
2631 pmap_fault_reload(pmap, pgva, ftype)
2632 	pmap_t pmap;
2633 	vaddr_t pgva;
2634 	vm_prot_t ftype;
2635 {
2636 	int rv, s, pte, chkpte, sme;
2637 	vaddr_t segva;
2638 	pmeg_t pmegp;
2639 
2640 	if (pgva >= VM_MAXUSER_ADDRESS)
2641 		return (0);
2642 	if (pmap->pm_segmap == NULL) {
2643 #ifdef	PMAP_DEBUG
2644 		db_printf("pmap_fault_reload: null segmap\n");
2645 		Debugger();
2646 #endif
2647 		return (0);
2648 	}
2649 
2650 	/* Short-cut using the S/W segmap. */
2651 	if (pmap->pm_segmap[VA_SEGNUM(pgva)] == SEGINV)
2652 		return (0);
2653 
2654 	segva = m68k_trunc_seg(pgva);
2655 	chkpte = PG_VALID;
2656 	if (ftype & VM_PROT_WRITE)
2657 		chkpte |= PG_WRITE;
2658 	rv = 0;
2659 
2660 	s = splvm();
2661 
2662 	/*
2663 	 * Given that we faulted on a user-space address, we will
2664 	 * probably need a context.  Get a context now so we can
2665 	 * try to resolve the fault with a segmap reload.
2666 	 */
2667 	if (!has_context(pmap)) {
2668 		context_allocate(pmap);
2669 #ifdef PMAP_DEBUG
2670 		if (pmap_debug & PMD_CONTEXT)
2671 			printf("pmap_fault(%p) got context %d\n",
2672 				   pmap, pmap->pm_ctxnum);
2673 #endif
2674 		set_context(pmap->pm_ctxnum);
2675 	} else {
2676 #ifdef	PMAP_DEBUG
2677 		/* Make sure context is correct. */
2678 		if (pmap->pm_ctxnum != get_context()) {
2679 			db_printf("pmap_fault_reload: wrong context\n");
2680 			Debugger();
2681 			/* XXX: OK to proceed? */
2682 			set_context(pmap->pm_ctxnum);
2683 		}
2684 #endif
2685 	}
2686 
2687 	sme = get_segmap(segva);
2688 	if (sme == SEGINV) {
2689 		/* See if there is something to reload. */
2690 		pmegp = pmeg_cache(pmap, segva);
2691 		if (pmegp) {
2692 			/* Found one!  OK, reload it. */
2693 			pmap_stats.ps_pmeg_faultin++;
2694 			sme = pmegp->pmeg_index;
2695 			set_segmap(segva, sme);
2696 			pte = get_pte(pgva);
2697 			if (pte & chkpte)
2698 				rv = 1;
2699 		}
2700 	}
2701 
2702 	splx(s);
2703 	return (rv);
2704 }
2705 
2706 
2707 /*
2708  * Clear the modify bit for the given physical page.
2709  */
2710 boolean_t
2711 pmap_clear_modify(pg)
2712 	struct vm_page *pg;
2713 {
2714 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2715 	pv_entry_t *head;
2716 	u_char *pv_flags;
2717 	int s;
2718 	boolean_t rv;
2719 
2720 	pv_flags = pa_to_pvflags(pa);
2721 	head     = pa_to_pvhead(pa);
2722 
2723 	s = splvm();
2724 	*pv_flags |= pv_syncflags(*head);
2725 	rv = *pv_flags & PV_MOD;
2726 	*pv_flags &= ~PV_MOD;
2727 	splx(s);
2728 	return rv;
2729 }
2730 
2731 /*
2732  * Tell whether the given physical page has been modified.
2733  */
2734 boolean_t
2735 pmap_is_modified(pg)
2736 	struct vm_page *pg;
2737 {
2738 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2739 	pv_entry_t *head;
2740 	u_char *pv_flags;
2741 	int s;
2742 	boolean_t rv;
2743 
2744 	pv_flags = pa_to_pvflags(pa);
2745 	head     = pa_to_pvhead(pa);
2746 
2747 	s = splvm();
2748 	if ((*pv_flags & PV_MOD) == 0)
2749 		*pv_flags |= pv_syncflags(*head);
2750 	rv = (*pv_flags & PV_MOD);
2751 	splx(s);
2752 	return (rv);
2753 }
2754 
2755 /*
2756  * Clear the reference bit for the given physical page.
2757  * It's OK to just remove mappings if that's easier.
2758  */
2759 boolean_t
2760 pmap_clear_reference(pg)
2761 	struct vm_page *pg;
2762 {
2763 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2764 	pv_entry_t *head;
2765 	u_char *pv_flags;
2766 	int s;
2767 	boolean_t rv;
2768 
2769 	pv_flags = pa_to_pvflags(pa);
2770 	head     = pa_to_pvhead(pa);
2771 
2772 	s = splvm();
2773 	*pv_flags |= pv_syncflags(*head);
2774 	rv = *pv_flags & PV_REF;
2775 	*pv_flags &= ~PV_REF;
2776 	splx(s);
2777 	return rv;
2778 }
2779 
2780 /*
2781  * Tell whether the given physical page has been referenced.
2782  * It's OK to just return FALSE if page is not mapped.
2783  */
2784 boolean_t
2785 pmap_is_referenced(pg)
2786 	struct vm_page *pg;
2787 {
2788 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2789 	pv_entry_t *head;
2790 	u_char *pv_flags;
2791 	int s;
2792 	boolean_t rv;
2793 
2794 	pv_flags = pa_to_pvflags(pa);
2795 	head     = pa_to_pvhead(pa);
2796 
2797 	s = splvm();
2798 	if ((*pv_flags & PV_REF) == 0)
2799 		*pv_flags |= pv_syncflags(*head);
2800 	rv = (*pv_flags & PV_REF);
2801 	splx(s);
2802 	return (rv);
2803 }
2804 
2805 
2806 /*
2807  * This is called by locore.s:cpu_switch() when it is
2808  * switching to a new process.  Load new translations.
2809  */
2810 void
2811 _pmap_switch(pmap)
2812 	pmap_t pmap;
2813 {
2814 
2815 	/*
2816 	 * Since we maintain completely separate user and kernel address
2817 	 * spaces, whenever we switch to a process, we need to make sure
2818 	 * that it has a context allocated.
2819 	 */
2820 	if (!has_context(pmap)) {
2821 		context_allocate(pmap);
2822 #ifdef PMAP_DEBUG
2823 		if (pmap_debug & PMD_CONTEXT)
2824 			printf("_pmap_switch(%p) got context %d\n",
2825 				   pmap, pmap->pm_ctxnum);
2826 #endif
2827 	}
2828 	set_context(pmap->pm_ctxnum);
2829 }
2830 
2831 /*
2832  * Exported version of pmap_activate().  This is called from the
2833  * machine-independent VM code when a process is given a new pmap.
2834  * If (p == curproc) do like cpu_switch would do; otherwise just
2835  * take this as notification that the process has a new pmap.
2836  */
2837 void
2838 pmap_activate(p)
2839 	struct proc *p;
2840 {
2841 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
2842 
2843 	if (p == curproc) {
2844 		_pmap_switch(pmap);
2845 	}
2846 }
2847 
2848 /*
2849  * Deactivate the address space of the specified process.
2850  */
2851 void
2852 pmap_deactivate(p)
2853 	struct proc *p;
2854 {
2855 	/* Nothing to do. */
2856 }
2857 
2858 /*
2859  *	Routine:	pmap_unwire
2860  *	Function:	Clear the wired attribute for a map/virtual-address
2861  *			pair.
2862  *	In/out conditions:
2863  *			The mapping must already exist in the pmap.
2864  */
2865 void
2866 pmap_unwire(pmap, va)
2867 	pmap_t	pmap;
2868 	vaddr_t	va;
2869 {
2870 	int s, sme;
2871 	int wiremask, ptenum;
2872 	pmeg_t pmegp;
2873 	int saved_ctx;
2874 
2875 #ifdef PMAP_DEBUG
2876 	if (pmap_debug & PMD_WIRING)
2877 		printf("pmap_unwire(pmap=%p, va=0x%lx)\n",
2878 			   pmap, va);
2879 #endif
2880 	/*
2881 	 * We are asked to unwire pages that were wired when
2882 	 * pmap_enter() was called and we ignored wiring.
2883 	 * (VM code appears to wire a stack page during fork.)
2884 	 */
2885 	if (pmap != kernel_pmap) {
2886 #ifdef PMAP_DEBUG
2887 		if (pmap_debug & PMD_WIRING) {
2888 			db_printf("  (user pmap -- ignored)\n");
2889 			Debugger();
2890 		}
2891 #endif
2892 		return;
2893 	}
2894 
2895 	ptenum = VA_PTE_NUM(va);
2896 	wiremask = 1 << ptenum;
2897 
2898 	s = splvm();
2899 	saved_ctx = get_context();
2900 	set_context(KERNEL_CONTEXT);
2901 	sme = get_segmap(va);
2902 	set_context(saved_ctx);
2903 	pmegp = pmeg_p(sme);
2904 	pmegp->pmeg_wired &= ~wiremask;
2905 	splx(s);
2906 }
2907 
2908 /*
2909  *	Copy the range specified by src_addr/len
2910  *	from the source map to the range dst_addr/len
2911  *	in the destination map.
2912  *
2913  *	This routine is only advisory and need not do anything.
2914  */
2915 void
2916 pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
2917 	pmap_t		dst_pmap;
2918 	pmap_t		src_pmap;
2919 	vaddr_t		dst_addr;
2920 	vsize_t		len;
2921 	vaddr_t		src_addr;
2922 {
2923 }
2924 
2925 /*
2926  * This extracts the PMEG associated with the given map/virtual
2927  * address pair.  Returns SEGINV if VA not valid.
2928  */
2929 int
2930 _pmap_extract_pmeg(pmap, va)
2931 		pmap_t	pmap;
2932 		vaddr_t va;
2933 {
2934 		int s, saved_ctx, segnum, sme;
2935 
2936 		s = splvm();
2937 
2938 		if (pmap == kernel_pmap) {
2939 				saved_ctx = get_context();
2940 				set_context(KERNEL_CONTEXT);
2941 				sme = get_segmap(va);
2942 				set_context(saved_ctx);
2943 		} else {
2944 				/* This is rare, so do it the easy way. */
2945 				segnum = VA_SEGNUM(va);
2946 				sme = pmap->pm_segmap[segnum];
2947 		}
2948 
2949 		splx(s);
2950 		return (sme);
2951 }
2952 
2953 /*
2954  *	Routine:	pmap_extract
2955  *	Function:
2956  *		Extract the physical page address associated
2957  *		with the given map/virtual_address pair.
2958  *	Returns zero if VA not valid.
2959  */
2960 boolean_t
2961 pmap_extract(pmap, va, pap)
2962 	pmap_t	pmap;
2963 	vaddr_t va;
2964 	paddr_t *pap;
2965 {
2966 	int s, sme, segnum, ptenum, pte;
2967 	paddr_t pa;
2968 	int saved_ctx;
2969 
2970 	pte = 0;
2971 	s = splvm();
2972 	if (pmap == kernel_pmap) {
2973 		saved_ctx = get_context();
2974 		set_context(KERNEL_CONTEXT);
2975 		sme = get_segmap(va);
2976 		if (sme != SEGINV)
2977 			pte = get_pte(va);
2978 		set_context(saved_ctx);
2979 	} else {
2980 		/* This is rare, so do it the easy way. */
2981 		segnum = VA_SEGNUM(va);
2982 		sme = pmap->pm_segmap[segnum];
2983 		if (sme != SEGINV) {
2984 			ptenum = VA_PTE_NUM(va);
2985 			pte = get_pte_pmeg(sme, ptenum);
2986 		}
2987 	}
2988 	splx(s);
2989 
2990 	if ((pte & PG_VALID) == 0) {
2991 #ifdef PMAP_DEBUG
2992 		db_printf("pmap_extract: invalid va=0x%lx\n", va);
2993 		Debugger();
2994 #endif
2995 		return (FALSE);
2996 	}
2997 	pa = PG_PA(pte);
2998 #ifdef	DIAGNOSTIC
2999 	if (pte & PG_TYPE) {
3000 		panic("pmap_extract: not main mem, va=0x%lx", va);
3001 	}
3002 #endif
3003 	if (pap != NULL)
3004 		*pap = pa;
3005 	return (TRUE);
3006 }
3007 
3008 
3009 /*
3010  *	  pmap_page_protect:
3011  *
3012  *	  Lower the permission for all mappings to a given page.
3013  */
3014 void
3015 pmap_page_protect(pg, prot)
3016 	struct vm_page *pg;
3017 	vm_prot_t	   prot;
3018 {
3019 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
3020 	int s;
3021 
3022 	s = splvm();
3023 #ifdef PMAP_DEBUG
3024 	if (pmap_debug & PMD_PROTECT)
3025 		printf("pmap_page_protect(0x%lx, 0x%lx)\n", pa, prot);
3026 #endif
3027 	switch (prot) {
3028 	case VM_PROT_ALL:
3029 		break;
3030 	case VM_PROT_READ:
3031 	case VM_PROT_READ|VM_PROT_EXECUTE:
3032 		pv_changepte(pa, 0, PG_WRITE);
3033 		break;
3034 	default:
3035 		/* remove mapping for all pmaps that have it */
3036 		pv_remove_all(pa);
3037 		break;
3038 	}
3039 	splx(s);
3040 }
3041 
3042 /*
3043  * Initialize a preallocated and zeroed pmap structure,
3044  * such as one in a vmspace structure.
3045  */
3046 void
3047 pmap_pinit(pmap)
3048 	pmap_t pmap;
3049 {
3050 	pmap_common_init(pmap);
3051 	pmap_user_init(pmap);
3052 }
3053 
3054 /*
3055  *	Reduce the permissions on the specified
3056  *	range of this map as requested.
3057  *	(Make pages read-only.)
3058  */
3059 void
3060 pmap_protect(pmap, sva, eva, prot)
3061 	pmap_t pmap;
3062 	vaddr_t sva, eva;
3063 	vm_prot_t	prot;
3064 {
3065 	vaddr_t va, neva;
3066 	int segnum;
3067 
3068 	/* If leaving writable, nothing to do. */
3069 	if (prot & VM_PROT_WRITE)
3070 		return;
3071 
3072 	/* If removing all permissions, just unmap. */
3073 	if ((prot & VM_PROT_READ) == 0) {
3074 		pmap_remove(pmap, sva, eva);
3075 		return;
3076 	}
3077 
3078 #ifdef	PMAP_DEBUG
3079 	if ((pmap_debug & PMD_PROTECT) ||
3080 		((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
3081 		printf("pmap_protect(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
3082 #endif
3083 
3084 	KASSERT((pmap == kernel_pmap) ?
3085 			sva >= virtual_avail && eva < DVMA_MAP_END :
3086 			eva <= VM_MAXUSER_ADDRESS);
3087 	va = sva;
3088 	segnum = VA_SEGNUM(va);
3089 	while (va < eva) {
3090 		neva = m68k_trunc_seg(va) + NBSG;
3091 		if (neva > eva)
3092 			neva = eva;
3093 		if (pmap->pm_segmap[segnum] != SEGINV)
3094 			pmap_protect1(pmap, va, neva);
3095 		va = neva;
3096 		segnum++;
3097 	}
3098 }
3099 
3100 /*
3101  * Remove write permissions in given range.
3102  * (guaranteed to be within one segment)
3103  * similar to pmap_remove1()
3104  */
3105 void
3106 pmap_protect1(pmap, sva, eva)
3107 	pmap_t pmap;
3108 	vaddr_t sva, eva;
3109 {
3110 	int old_ctx, s, sme;
3111 	boolean_t in_ctx;
3112 
3113 	s = splvm();
3114 
3115 #ifdef	DIAGNOSTIC
3116 	if (m68k_trunc_seg(sva) != m68k_trunc_seg(eva-1))
3117 		panic("pmap_protect1: bad range!");
3118 #endif
3119 
3120 	if (pmap == kernel_pmap) {
3121 		old_ctx = get_context();
3122 		set_context(KERNEL_CONTEXT);
3123 		sme = get_segmap(sva);
3124 		if (sme != SEGINV)
3125 			pmap_protect_mmu(pmap, sva, eva);
3126 		set_context(old_ctx);
3127 		goto out;
3128 	}
3129 	/* It is a user pmap. */
3130 
3131 	/* There is a PMEG, but maybe not active. */
3132 	old_ctx = INVALID_CONTEXT;
3133 	in_ctx = FALSE;
3134 	if (has_context(pmap)) {
3135 		/* Temporary context change. */
3136 		old_ctx = get_context();
3137 		set_context(pmap->pm_ctxnum);
3138 		sme = get_segmap(sva);
3139 		if (sme != SEGINV)
3140 			in_ctx = TRUE;
3141 	}
3142 
3143 	if (in_ctx == TRUE)
3144 		pmap_protect_mmu(pmap, sva, eva);
3145 	else
3146 		pmap_protect_noctx(pmap, sva, eva);
3147 
3148 	if (old_ctx != INVALID_CONTEXT) {
3149 		/* Restore previous context. */
3150 		set_context(old_ctx);
3151 	}
3152 
3153 out:
3154 	splx(s);
3155 }
3156 
3157 /*
3158  * Remove write permissions, all in one PMEG,
3159  * where that PMEG is currently in the MMU.
3160  * The current context is already correct.
3161  */
3162 void
3163 pmap_protect_mmu(pmap, sva, eva)
3164 	pmap_t pmap;
3165 	vaddr_t sva, eva;
3166 {
3167 	pmeg_t pmegp;
3168 	vaddr_t pgva, segva;
3169 	int pte, sme;
3170 #ifdef	HAVECACHE
3171 	int flush_by_page = 0;
3172 #endif
3173 
3174 	CHECK_SPL();
3175 
3176 #ifdef	DIAGNOSTIC
3177 		if (pmap->pm_ctxnum != get_context())
3178 			panic("pmap_protect_mmu: wrong context");
3179 #endif
3180 
3181 	segva = m68k_trunc_seg(sva);
3182 	sme = get_segmap(segva);
3183 
3184 #ifdef	DIAGNOSTIC
3185 	/* Make sure it is valid and known. */
3186 	if (sme == SEGINV)
3187 		panic("pmap_protect_mmu: SEGINV");
3188 	if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
3189 		panic("pmap_protect_mmu: incorrect sme, va=0x%lx", segva);
3190 #endif
3191 
3192 	pmegp = pmeg_p(sme);
3193 	/* have pmeg, will travel */
3194 
3195 #ifdef	DIAGNOSTIC
3196 	/* Make sure we own the pmeg, right va, etc. */
3197 	if ((pmegp->pmeg_va != segva) ||
3198 		(pmegp->pmeg_owner != pmap) ||
3199 		(pmegp->pmeg_version != pmap->pm_version))
3200 	{
3201 		panic("pmap_protect_mmu: bad pmeg=%p", pmegp);
3202 	}
3203 	if (pmegp->pmeg_vpages < 0)
3204 		panic("pmap_protect_mmu: npages corrupted");
3205 	if (pmegp->pmeg_vpages == 0)
3206 		panic("pmap_protect_mmu: no valid pages?");
3207 #endif
3208 
3209 #ifdef	HAVECACHE
3210 	if (cache_size) {
3211 		/*
3212 		 * If the range to be removed is larger than the cache,
3213 		 * it will be cheaper to flush this segment entirely.
3214 		 */
3215 		if (cache_size < (eva - sva)) {
3216 			/* cheaper to flush whole segment */
3217 			cache_flush_segment(segva);
3218 		} else {
3219 			flush_by_page = 1;
3220 		}
3221 	}
3222 #endif
3223 
3224 	/* Remove write permission in the given range. */
3225 	for (pgva = sva; pgva < eva; pgva += NBPG) {
3226 		pte = get_pte(pgva);
3227 		if (pte & PG_VALID) {
3228 #ifdef	HAVECACHE
3229 			if (flush_by_page) {
3230 				cache_flush_page(pgva);
3231 				/* Get fresh mod/ref bits from write-back. */
3232 				pte = get_pte(pgva);
3233 			}
3234 #endif
3235 			if (IS_MAIN_MEM(pte)) {
3236 				save_modref_bits(pte);
3237 			}
3238 			pte &= ~(PG_WRITE | PG_MODREF);
3239 			set_pte(pgva, pte);
3240 		}
3241 	}
3242 }
3243 
3244 /*
3245  * Remove write permissions, all in one PMEG,
3246  * where it is not currently in any context.
3247  */
3248 void
3249 pmap_protect_noctx(pmap, sva, eva)
3250 	pmap_t pmap;
3251 	vaddr_t sva, eva;
3252 {
3253 	int old_ctx, pte, sme, segnum;
3254 	vaddr_t pgva, segva;
3255 
3256 #ifdef	DIAGNOSTIC
3257 	/* Kernel always in a context (actually, in context zero). */
3258 	if (pmap == kernel_pmap)
3259 		panic("pmap_protect_noctx: kernel_pmap");
3260 	if (pmap->pm_segmap == NULL)
3261 		panic("pmap_protect_noctx: null segmap");
3262 #endif
3263 
3264 	segva = m68k_trunc_seg(sva);
3265 	segnum = VA_SEGNUM(segva);
3266 	sme = pmap->pm_segmap[segnum];
3267 	if (sme == SEGINV)
3268 		return;
3269 
3270 	/*
3271 	 * Switch to the kernel context so we can access the PMEG
3272 	 * using the temporary segment.
3273 	 */
3274 	old_ctx = get_context();
3275 	set_context(KERNEL_CONTEXT);
3276 #ifdef	DIAGNOSTIC
3277 	if (temp_seg_inuse)
3278 		panic("pmap_protect_noctx: temp_seg_inuse");
3279 	temp_seg_inuse++;
3280 #endif
3281 	set_segmap(temp_seg_va, sme);
3282 	sva += (temp_seg_va - segva);
3283 	eva += (temp_seg_va - segva);
3284 
3285 	/* Remove write permission in the given range. */
3286 	for (pgva = sva; pgva < eva; pgva += NBPG) {
3287 		pte = get_pte(pgva);
3288 		if (pte & PG_VALID) {
3289 			/* No cache flush needed. */
3290 			if (IS_MAIN_MEM(pte)) {
3291 				save_modref_bits(pte);
3292 			}
3293 			pte &= ~(PG_WRITE | PG_MODREF);
3294 			set_pte(pgva, pte);
3295 		}
3296 	}
3297 
3298 	/*
3299 	 * Release the temporary segment, and
3300 	 * restore the previous context.
3301 	 */
3302 	set_segmap(temp_seg_va, SEGINV);
3303 #ifdef	DIAGNOSTIC
3304 	temp_seg_inuse--;
3305 #endif
3306 	set_context(old_ctx);
3307 }
3308 
3309 
3310 /*
3311  *	Remove the given range of addresses from the specified map.
3312  *
3313  *	It is assumed that the start and end are properly
3314  *	rounded to the page size.
3315  */
3316 void
3317 pmap_remove(pmap, sva, eva)
3318 	pmap_t pmap;
3319 	vaddr_t sva, eva;
3320 {
3321 	vaddr_t va, neva;
3322 	int segnum;
3323 
3324 #ifdef	PMAP_DEBUG
3325 	if ((pmap_debug & PMD_REMOVE) ||
3326 		((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
3327 		printf("pmap_remove(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
3328 #endif
3329 
3330 
3331 	KASSERT((pmap == kernel_pmap) ?
3332 			sva >= virtual_avail && eva < DVMA_MAP_END :
3333 			eva <= VM_MAXUSER_ADDRESS);
3334 	va = sva;
3335 	segnum = VA_SEGNUM(va);
3336 	while (va < eva) {
3337 		neva = m68k_trunc_seg(va) + NBSG;
3338 		if (neva > eva)
3339 			neva = eva;
3340 		if (pmap->pm_segmap[segnum] != SEGINV)
3341 			pmap_remove1(pmap, va, neva);
3342 		va = neva;
3343 		segnum++;
3344 	}
3345 }
3346 
3347 /*
3348  * Remove user mappings, all within one segment
3349  */
3350 void
3351 pmap_remove1(pmap, sva, eva)
3352 	pmap_t pmap;
3353 	vaddr_t sva, eva;
3354 {
3355 	int old_ctx, s, sme;
3356 	boolean_t in_ctx;
3357 
3358 	s = splvm();
3359 
3360 #ifdef	DIAGNOSTIC
3361 	if (m68k_trunc_seg(sva) != m68k_trunc_seg(eva-1))
3362 		panic("pmap_remove1: bad range!");
3363 #endif
3364 
3365 	if (pmap == kernel_pmap) {
3366 		old_ctx = get_context();
3367 		set_context(KERNEL_CONTEXT);
3368 		sme = get_segmap(sva);
3369 		if (sme != SEGINV)
3370 			pmap_remove_mmu(pmap, sva, eva);
3371 		set_context(old_ctx);
3372 		goto out;
3373 	}
3374 	/* It is a user pmap. */
3375 
3376 	/* There is a PMEG, but maybe not active. */
3377 	old_ctx = INVALID_CONTEXT;
3378 	in_ctx = FALSE;
3379 	if (has_context(pmap)) {
3380 		/* Temporary context change. */
3381 		old_ctx = get_context();
3382 		set_context(pmap->pm_ctxnum);
3383 		sme = get_segmap(sva);
3384 		if (sme != SEGINV)
3385 			in_ctx = TRUE;
3386 	}
3387 
3388 	if (in_ctx == TRUE)
3389 		pmap_remove_mmu(pmap, sva, eva);
3390 	else
3391 		pmap_remove_noctx(pmap, sva, eva);
3392 
3393 	if (old_ctx != INVALID_CONTEXT) {
3394 		/* Restore previous context. */
3395 		set_context(old_ctx);
3396 	}
3397 
3398 out:
3399 	splx(s);
3400 }
3401 
3402 /*
3403  * Remove some mappings, all in one PMEG,
3404  * where that PMEG is currently in the MMU.
3405  * The current context is already correct.
3406  * If no PTEs remain valid in the PMEG, free it.
3407  */
3408 void
3409 pmap_remove_mmu(pmap, sva, eva)
3410 	pmap_t pmap;
3411 	vaddr_t sva, eva;
3412 {
3413 	pmeg_t pmegp;
3414 	vaddr_t pgva, segva;
3415 	int pte, sme;
3416 #ifdef	HAVECACHE
3417 	int flush_by_page = 0;
3418 #endif
3419 
3420 	CHECK_SPL();
3421 
3422 #ifdef	DIAGNOSTIC
3423 		if (pmap->pm_ctxnum != get_context())
3424 			panic("pmap_remove_mmu: wrong context");
3425 #endif
3426 
3427 	segva = m68k_trunc_seg(sva);
3428 	sme = get_segmap(segva);
3429 
3430 #ifdef	DIAGNOSTIC
3431 	/* Make sure it is valid and known. */
3432 	if (sme == SEGINV)
3433 		panic("pmap_remove_mmu: SEGINV");
3434 	if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
3435 		panic("pmap_remove_mmu: incorrect sme, va=0x%lx", segva);
3436 #endif
3437 
3438 	pmegp = pmeg_p(sme);
3439 	/* have pmeg, will travel */
3440 
3441 #ifdef	DIAGNOSTIC
3442 	/* Make sure we own the pmeg, right va, etc. */
3443 	if ((pmegp->pmeg_va != segva) ||
3444 		(pmegp->pmeg_owner != pmap) ||
3445 		(pmegp->pmeg_version != pmap->pm_version))
3446 	{
3447 		panic("pmap_remove_mmu: bad pmeg=%p", pmegp);
3448 	}
3449 	if (pmegp->pmeg_vpages < 0)
3450 		panic("pmap_remove_mmu: npages corrupted");
3451 	if (pmegp->pmeg_vpages == 0)
3452 		panic("pmap_remove_mmu: no valid pages?");
3453 #endif
3454 
3455 #ifdef	HAVECACHE
3456 	if (cache_size) {
3457 		/*
3458 		 * If the range to be removed is larger than the cache,
3459 		 * it will be cheaper to flush this segment entirely.
3460 		 */
3461 		if (cache_size < (eva - sva)) {
3462 			/* cheaper to flush whole segment */
3463 			cache_flush_segment(segva);
3464 		} else {
3465 			flush_by_page = 1;
3466 		}
3467 	}
3468 #endif
3469 
3470 	/* Invalidate the PTEs in the given range. */
3471 	for (pgva = sva; pgva < eva; pgva += NBPG) {
3472 		pte = get_pte(pgva);
3473 		if (pte & PG_VALID) {
3474 #ifdef	HAVECACHE
3475 			if (flush_by_page) {
3476 				cache_flush_page(pgva);
3477 				/* Get fresh mod/ref bits from write-back. */
3478 				pte = get_pte(pgva);
3479 			}
3480 #endif
3481 			if (IS_MAIN_MEM(pte)) {
3482 				save_modref_bits(pte);
3483 				pv_unlink(pmap, pte, pgva);
3484 			}
3485 #ifdef	PMAP_DEBUG
3486 			if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
3487 				printf("pmap: set_pte pmap=%p va=0x%lx"
3488 					   " old=0x%x new=0x%x (rrmmu)\n",
3489 					   pmap, pgva, pte, PG_INVAL);
3490 			}
3491 #endif
3492 			set_pte(pgva, PG_INVAL);
3493 			KASSERT(pmegp->pmeg_vpages > 0);
3494 			pmegp->pmeg_vpages--;
3495 		}
3496 	}
3497 
3498 	KASSERT(pmegp->pmeg_vpages >= 0);
3499 	if (pmegp->pmeg_vpages == 0) {
3500 		/* We are done with this pmeg. */
3501 		if (is_pmeg_wired(pmegp)) {
3502 #ifdef	PMAP_DEBUG
3503 			if (pmap_debug & PMD_WIRING) {
3504 				db_printf("pmap: removing wired pmeg: %p\n", pmegp);
3505 				Debugger();
3506 			}
3507 #endif	/* PMAP_DEBUG */
3508 		}
3509 
3510 #ifdef	PMAP_DEBUG
3511 		if (pmap_debug & PMD_SEGMAP) {
3512 			printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x new=ff (rm)\n",
3513 			    pmap->pm_ctxnum, segva, pmegp->pmeg_index);
3514 		}
3515 		pmeg_verify_empty(segva);
3516 #endif
3517 
3518 		/* Remove it from the MMU. */
3519 		if (kernel_pmap == pmap) {
3520 			/* Did cache flush above. */
3521 			set_segmap(segva, SEGINV);
3522 		} else {
3523 			/* Did cache flush above. */
3524 			set_segmap(segva, SEGINV);
3525 		}
3526 		pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV;
3527 		/* Now, put it on the free list. */
3528 		pmeg_free(pmegp);
3529 	}
3530 }
3531 
3532 /*
3533  * Remove some mappings, all in one PMEG,
3534  * where it is not currently in any context.
3535  */
3536 void
3537 pmap_remove_noctx(pmap, sva, eva)
3538 	pmap_t pmap;
3539 	vaddr_t sva, eva;
3540 {
3541 	pmeg_t pmegp;
3542 	int old_ctx, pte, sme, segnum;
3543 	vaddr_t pgva, segva;
3544 
3545 	CHECK_SPL();
3546 
3547 #ifdef	DIAGNOSTIC
3548 	/* Kernel always in a context (actually, in context zero). */
3549 	if (pmap == kernel_pmap)
3550 		panic("pmap_remove_noctx: kernel_pmap");
3551 	if (pmap->pm_segmap == NULL)
3552 		panic("pmap_remove_noctx: null segmap");
3553 #endif
3554 
3555 	segva = m68k_trunc_seg(sva);
3556 	segnum = VA_SEGNUM(segva);
3557 	sme = pmap->pm_segmap[segnum];
3558 	if (sme == SEGINV)
3559 		return;
3560 	pmegp = pmeg_p(sme);
3561 
3562 	/*
3563 	 * Switch to the kernel context so we can access the PMEG
3564 	 * using the temporary segment.
3565 	 */
3566 	old_ctx = get_context();
3567 	set_context(KERNEL_CONTEXT);
3568 #ifdef	DIAGNOSTIC
3569 	if (temp_seg_inuse)
3570 		panic("pmap_remove_noctx: temp_seg_inuse");
3571 	temp_seg_inuse++;
3572 #endif
3573 	set_segmap(temp_seg_va, sme);
3574 	sva += (temp_seg_va - segva);
3575 	eva += (temp_seg_va - segva);
3576 
3577 	/* Invalidate the PTEs in the given range. */
3578 	for (pgva = sva; pgva < eva; pgva += NBPG) {
3579 		pte = get_pte(pgva);
3580 		if (pte & PG_VALID) {
3581 			/* No cache flush needed. */
3582 			if (IS_MAIN_MEM(pte)) {
3583 				save_modref_bits(pte);
3584 				pv_unlink(pmap, pte, pgva - (temp_seg_va - segva));
3585 			}
3586 #ifdef	PMAP_DEBUG
3587 			if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
3588 				printf("pmap: set_pte pmap=%p va=0x%lx"
3589 					   " old=0x%x new=0x%x (rrncx)\n",
3590 					   pmap, pgva, pte, PG_INVAL);
3591 			}
3592 #endif
3593 			set_pte(pgva, PG_INVAL);
3594 			KASSERT(pmegp->pmeg_vpages > 0);
3595 			pmegp->pmeg_vpages--;
3596 		}
3597 	}
3598 
3599 	/*
3600 	 * Release the temporary segment, and
3601 	 * restore the previous context.
3602 	 */
3603 	set_segmap(temp_seg_va, SEGINV);
3604 #ifdef	DIAGNOSTIC
3605 	temp_seg_inuse--;
3606 #endif
3607 	set_context(old_ctx);
3608 
3609 	KASSERT(pmegp->pmeg_vpages >= 0);
3610 	if (pmegp->pmeg_vpages == 0) {
3611 		/* We are done with this pmeg. */
3612 		if (is_pmeg_wired(pmegp)) {
3613 #ifdef	PMAP_DEBUG
3614 			if (pmap_debug & PMD_WIRING) {
3615 				db_printf("pmap: removing wired pmeg: %p\n", pmegp);
3616 				Debugger();
3617 			}
3618 #endif	/* PMAP_DEBUG */
3619 		}
3620 
3621 		pmap->pm_segmap[segnum] = SEGINV;
3622 		pmeg_free(pmegp);
3623 	}
3624 }
3625 
3626 
3627 /*
3628  * Count resident pages in this pmap.
3629  * See: kern_sysctl.c:pmap_resident_count
3630  */
3631 segsz_t
3632 pmap_resident_pages(pmap)
3633 	pmap_t pmap;
3634 {
3635 	int i, sme, pages;
3636 	pmeg_t pmeg;
3637 
3638 	if (pmap->pm_segmap == 0)
3639 		return (0);
3640 
3641 	pages = 0;
3642 	for (i = 0; i < NUSEG; i++) {
3643 		sme = pmap->pm_segmap[i];
3644 		if (sme != SEGINV) {
3645 			pmeg = pmeg_p(sme);
3646 			pages += pmeg->pmeg_vpages;
3647 		}
3648 	}
3649 	return (pages);
3650 }
3651 
3652 /*
3653  * Count wired pages in this pmap.
3654  * See vm_mmap.c:pmap_wired_count
3655  */
3656 segsz_t
3657 pmap_wired_pages(pmap)
3658 	pmap_t pmap;
3659 {
3660 	int i, mask, sme, pages;
3661 	pmeg_t pmeg;
3662 
3663 	if (pmap->pm_segmap == 0)
3664 		return (0);
3665 
3666 	pages = 0;
3667 	for (i = 0; i < NUSEG; i++) {
3668 		sme = pmap->pm_segmap[i];
3669 		if (sme != SEGINV) {
3670 			pmeg = pmeg_p(sme);
3671 			mask = 0x8000;
3672 			do {
3673 				if (pmeg->pmeg_wired & mask)
3674 					pages++;
3675 				mask = (mask >> 1);
3676 			} while (mask);
3677 		}
3678 	}
3679 	return (pages);
3680 }
3681 
3682 
3683 /*
3684  *	pmap_copy_page copies the specified (machine independent)
3685  *	page by mapping the page into virtual memory and using
3686  *	bcopy to copy the page, one machine dependent page at a
3687  *	time.
3688  */
3689 void
3690 pmap_copy_page(src, dst)
3691 	paddr_t	src, dst;
3692 {
3693 	int pte;
3694 	int s;
3695 	int saved_ctx;
3696 
3697 	s = splvm();
3698 
3699 #ifdef	PMAP_DEBUG
3700 	if (pmap_debug & PMD_COW)
3701 		printf("pmap_copy_page: 0x%lx -> 0x%lx\n", src, dst);
3702 #endif
3703 
3704 	/*
3705 	 * Temporarily switch to the kernel context to use the
3706 	 * tmp_vpages.
3707 	 */
3708 	saved_ctx = get_context();
3709 	set_context(KERNEL_CONTEXT);
3710 #ifdef DIAGNOSTIC
3711 	if (tmp_vpages_inuse)
3712 		panic("pmap_copy_page: vpages inuse");
3713 	tmp_vpages_inuse++;
3714 #endif
3715 
3716 	/* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
3717 	/* All mappings to vmp_vpages are non-cached, so no flush. */
3718 	pte = PG_PERM | PA_PGNUM(src);
3719 	set_pte(tmp_vpages[0], pte);
3720 	pte = PG_PERM | PA_PGNUM(dst);
3721 	set_pte(tmp_vpages[1], pte);
3722 	copypage((char *) tmp_vpages[0], (char *) tmp_vpages[1]);
3723 	set_pte(tmp_vpages[0], PG_INVAL);
3724 	set_pte(tmp_vpages[1], PG_INVAL);
3725 
3726 #ifdef DIAGNOSTIC
3727 	tmp_vpages_inuse--;
3728 #endif
3729 	set_context(saved_ctx);
3730 
3731 	splx(s);
3732 }
3733 
3734 /*
3735  *	pmap_zero_page zeros the specified (machine independent)
3736  *	page by mapping the page into virtual memory and using
3737  *	bzero to clear its contents, one machine dependent page
3738  *	at a time.
3739  */
3740 void
3741 pmap_zero_page(pa)
3742 	paddr_t	pa;
3743 {
3744 	int pte;
3745 	int s;
3746 	int saved_ctx;
3747 
3748 	s = splvm();
3749 
3750 #ifdef	PMAP_DEBUG
3751 	if (pmap_debug & PMD_COW)
3752 		printf("pmap_zero_page: 0x%lx\n", pa);
3753 #endif
3754 
3755 	/*
3756 	 * Temporarily switch to the kernel context to use the
3757 	 * tmp_vpages.
3758 	 */
3759 	saved_ctx = get_context();
3760 	set_context(KERNEL_CONTEXT);
3761 #ifdef DIAGNOSTIC
3762 	if (tmp_vpages_inuse)
3763 		panic("pmap_zero_page: vpages inuse");
3764 	tmp_vpages_inuse++;
3765 #endif
3766 
3767 	/* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
3768 	/* All mappings to vmp_vpages are non-cached, so no flush. */
3769 	pte = PG_PERM | PA_PGNUM(pa);
3770 	set_pte(tmp_vpages[0], pte);
3771 	zeropage((char *) tmp_vpages[0]);
3772 	set_pte(tmp_vpages[0], PG_INVAL);
3773 
3774 #ifdef DIAGNOSTIC
3775 	tmp_vpages_inuse--;
3776 #endif
3777 	set_context(saved_ctx);
3778 
3779 	splx(s);
3780 }
3781 
3782 /*
3783  *	Routine:	pmap_collect
3784  *	Function:
3785  *		Garbage collects the physical map system for
3786  *		pages which are no longer used.
3787  *		Success need not be guaranteed -- that is, there
3788  *		may well be pages which are not referenced, but
3789  *		others may be collected.
3790  *	Usage:
3791  *		Called by the pageout daemon when pages are scarce.
3792  */
3793 void
3794 pmap_collect(pmap)
3795 	pmap_t pmap;
3796 {
3797 }
3798 
3799 /*
3800  * Find first virtual address >= *va that is
3801  * least likely to cause cache aliases.
3802  * (This will just seg-align mappings.)
3803  */
3804 void
3805 pmap_prefer(fo, va)
3806 	vaddr_t fo;
3807 	vaddr_t *va;
3808 {
3809 	long d;
3810 
3811 	d = fo - *va;
3812 	d &= SEGOFSET;
3813 	*va += d;
3814 }
3815 
3816 /*
3817  * Fill in the sun2-specific part of the kernel core header
3818  * for dumpsys().  (See machdep.c for the rest.)
3819  */
3820 void
3821 pmap_kcore_hdr(sh)
3822 	struct sun2_kcore_hdr *sh;
3823 {
3824 	vaddr_t va;
3825 	u_char *cp, *ep;
3826 	int saved_ctx;
3827 
3828 	sh->segshift = SEGSHIFT;
3829 	sh->pg_frame = PG_FRAME;
3830 	sh->pg_valid = PG_VALID;
3831 
3832 	/* Copy the kernel segmap (256 bytes). */
3833 	va = KERNBASE;
3834 	cp = sh->ksegmap;
3835 	ep = cp + sizeof(sh->ksegmap);
3836 	saved_ctx = get_context();
3837 	set_context(KERNEL_CONTEXT);
3838 	do {
3839 		*cp = get_segmap(va);
3840 		va += NBSG;
3841 		cp++;
3842 	} while (cp < ep);
3843 	set_context(saved_ctx);
3844 }
3845 
3846 /*
3847  * Copy the pagemap RAM into the passed buffer (one page)
3848  * starting at OFF in the pagemap RAM.
3849  */
3850 void
3851 pmap_get_pagemap(pt, off)
3852 	int *pt;
3853 	int off;
3854 {
3855 	vaddr_t va, va_end;
3856 	int sme, sme_end;	/* SegMap Entry numbers */
3857 	int saved_ctx;
3858 
3859 	sme = (off / (NPAGSEG * sizeof(*pt)));	/* PMEG to start on */
3860 	sme_end = sme + (NBPG / (NPAGSEG * sizeof(*pt))); /* where to stop */
3861 	va_end = temp_seg_va + NBSG;
3862 
3863 	saved_ctx = get_context();
3864 	set_context(KERNEL_CONTEXT);
3865 	do {
3866 		set_segmap(temp_seg_va, sme);
3867 		va = temp_seg_va;
3868 		do {
3869 			*pt++ = get_pte(va);
3870 			va += NBPG;
3871 		} while (va < va_end);
3872 		sme++;
3873 	} while (sme < sme_end);
3874 	set_segmap(temp_seg_va, SEGINV);
3875 	set_context(saved_ctx);
3876 }
3877 
3878 
3879 /*
3880  * Helper functions for changing unloaded PMEGs
3881  */
3882 
3883 static int
3884 get_pte_pmeg(int pmeg_num, int page_num)
3885 {
3886 	vaddr_t va;
3887 	int pte;
3888 	int saved_ctx;
3889 
3890 	CHECK_SPL();
3891 	saved_ctx = get_context();
3892 	set_context(KERNEL_CONTEXT);
3893 #ifdef DIAGNOSTIC
3894 	if (temp_seg_inuse)
3895 		panic("get_pte_pmeg: temp_seg_inuse");
3896 	temp_seg_inuse++;
3897 #endif
3898 
3899 	va = temp_seg_va;
3900 	set_segmap(temp_seg_va, pmeg_num);
3901 	va += NBPG*page_num;
3902 	pte = get_pte(va);
3903 	set_segmap(temp_seg_va, SEGINV);
3904 
3905 #ifdef DIAGNOSTIC
3906 	temp_seg_inuse--;
3907 #endif
3908 	set_context(saved_ctx);
3909 	return pte;
3910 }
3911 
3912 static void
3913 set_pte_pmeg(int pmeg_num, int page_num, int pte)
3914 {
3915 	vaddr_t va;
3916 	int saved_ctx;
3917 
3918 	CHECK_SPL();
3919 	saved_ctx = get_context();
3920 	set_context(KERNEL_CONTEXT);
3921 #ifdef DIAGNOSTIC
3922 	if (temp_seg_inuse)
3923 		panic("set_pte_pmeg: temp_seg_inuse");
3924 	temp_seg_inuse++;
3925 #endif
3926 
3927 	/* We never access data in temp_seg_va so no need to flush. */
3928 	va = temp_seg_va;
3929 	set_segmap(temp_seg_va, pmeg_num);
3930 	va += NBPG*page_num;
3931 	set_pte(va, pte);
3932 	set_segmap(temp_seg_va, SEGINV);
3933 
3934 #ifdef DIAGNOSTIC
3935 	temp_seg_inuse--;
3936 #endif
3937 	set_context(saved_ctx);
3938 }
3939 
3940 /*
3941  *	Routine:        pmap_procwr
3942  *
3943  *	Function:
3944  *		Synchronize caches corresponding to [addr, addr+len) in p.
3945  */
3946 void
3947 pmap_procwr(p, va, len)
3948 	struct proc	*p;
3949 	vaddr_t		va;
3950 	size_t		len;
3951 {
3952 }
3953 
3954 
3955 #ifdef	PMAP_DEBUG
3956 /* Things to call from the debugger. */
3957 
3958 void
3959 pmap_print(pmap)
3960 	pmap_t pmap;
3961 {
3962 	db_printf(" pm_ctxnum=%d\n", pmap->pm_ctxnum);
3963 	db_printf(" pm_version=0x%x\n", pmap->pm_version);
3964 	db_printf(" pm_segmap=%p\n", pmap->pm_segmap);
3965 }
3966 
3967 void
3968 pmeg_print(pmegp)
3969 	pmeg_t pmegp;
3970 {
3971 	db_printf("link_next=%p  link_prev=%p\n",
3972 	    pmegp->pmeg_link.tqe_next,
3973 	    pmegp->pmeg_link.tqe_prev);
3974 	db_printf("index=0x%x owner=%p own_vers=0x%x\n",
3975 	    pmegp->pmeg_index, pmegp->pmeg_owner, pmegp->pmeg_version);
3976 	db_printf("va=0x%lx wired=0x%x reserved=0x%x vpgs=0x%x qstate=0x%x\n",
3977 	    pmegp->pmeg_va, pmegp->pmeg_wired,
3978 	    pmegp->pmeg_reserved, pmegp->pmeg_vpages,
3979 	    pmegp->pmeg_qstate);
3980 }
3981 
3982 void
3983 pv_print(pa)
3984 	paddr_t pa;
3985 {
3986 	pv_entry_t pv;
3987 	int idx;
3988 
3989 	idx = PA_PGNUM(pa);
3990 	if (idx >= physmem) {
3991 		db_printf("bad address\n");
3992 		return;
3993 	}
3994 	db_printf("pa=0x%lx, flags=0x%x\n",
3995 			  pa, pv_flags_tbl[idx]);
3996 
3997 	pv = pv_head_tbl[idx];
3998 	while (pv) {
3999 		db_printf(" pv_entry %p pmap %p va 0x%lx next %p\n",
4000 			   pv, pv->pv_pmap, pv->pv_va, pv->pv_next);
4001 		pv = pv->pv_next;
4002 	}
4003 }
4004 #endif	/* PMAP_DEBUG */
4005 
4006 /*
4007  * Local Variables:
4008  * tab-width: 4
4009  * End:
4010  */
4011