xref: /netbsd/sys/arch/sun3/sun3/pmap.c (revision c4a72b64)
1 /*	$NetBSD: pmap.c,v 1.138 2002/10/20 02:37:36 chs Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Adam Glass and Gordon W. Ross.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Some notes:
41  *
42  * sun3s have contexts (8).  In this pmap design, the kernel is mapped
43  * into all contexts.  Processes take up a known portion of the context,
44  * and compete for the available contexts on a LRU basis.
45  *
46  * sun3s also have this evil "PMEG" crapola.  Essentially each "context"'s
47  * address space is defined by the 2048 one-byte entries in the segment map.
48  * Each of these 1-byte entries points to a "Page Map Entry Group" (PMEG)
49  * which contains the mappings for that virtual segment.  (This strange
50  * terminology invented by Sun and preserved here for consistency.)
51  * Each PMEG maps a segment of 128Kb length, with 16 pages of 8Kb each.
52  *
53  * As you might guess, these PMEGs are in short supply and heavy demand.
54  * PMEGs allocated to the kernel are "static" in the sense that they can't
55  * be stolen from it.  PMEGs allocated to a particular segment of a
56  * pmap's virtual space will be fought over by the other pmaps.
57  */
58 
59 /*
60  * Cache management:
61  * All sun3 cache implementations are write-back.
62  * Flushes must be done before removing translations
63  * from the MMU because the cache uses the MMU.
64  */
65 
66 /*
67  * wanted attributes:
68  *       pmegs that aren't needed by a pmap remain in the MMU.
69  *       quick context switches between pmaps
70  *       kernel is in all contexts
71  */
72 
73 /*
74  * Project1:  Use a "null" context for processes that have not
75  * touched any user-space address recently.  This is efficient
76  * for things that stay in the kernel for a while, waking up
77  * to handle some I/O then going back to sleep (i.e. nfsd).
78  * If and when such a process returns to user-mode, it will
79  * fault and be given a real context at that time.
80  *
81  * This also lets context switch be fast, because all we need
82  * to do there for the MMU is slam the context register.
83  *
84  * Project2:  Use a private pool of PV elements.  This pool can be
85  * fixed size because the total mapped virtual space supported by
86  * the MMU H/W (and this pmap) is fixed for all time.
87  */
88 
89 #include "opt_ddb.h"
90 
91 #include <sys/param.h>
92 #include <sys/systm.h>
93 #include <sys/proc.h>
94 #include <sys/malloc.h>
95 #include <sys/pool.h>
96 #include <sys/user.h>
97 #include <sys/queue.h>
98 #include <sys/kcore.h>
99 
100 #include <uvm/uvm.h>
101 
102 #include <machine/cpu.h>
103 #include <machine/dvma.h>
104 #include <machine/idprom.h>
105 #include <machine/kcore.h>
106 #include <machine/mon.h>
107 #include <machine/pmap.h>
108 #include <machine/pte.h>
109 #include <machine/vmparam.h>
110 #include <m68k/cacheops.h>
111 
112 #include <sun3/sun3/cache.h>
113 #include <sun3/sun3/control.h>
114 #include <sun3/sun3/fc.h>
115 #include <sun3/sun3/machdep.h>
116 #include <sun3/sun3/obmem.h>
117 
118 #ifdef DDB
119 #include <ddb/db_output.h>
120 #else
121 #define db_printf printf
122 #endif
123 
124 /* Verify this correspondence between definitions. */
125 #if	(PMAP_OBIO << PG_MOD_SHIFT) != PGT_OBIO
126 #error	"PMAP_XXX definitions don't match pte.h!"
127 #endif
128 
129 /* Type bits in a "pseudo" physical address. (XXX: pmap.h?) */
130 #define PMAP_TYPE	PMAP_VME32
131 
132 /*
133  * Local convenience macros
134  */
135 
136 #define DVMA_MAP_END	(DVMA_MAP_BASE + DVMA_MAP_AVAIL)
137 
138 /* User segments from 0 to KERNBASE */
139 #define	NUSEG	(KERNBASE / NBSG)
140 /* The remainder are kernel segments. */
141 #define	NKSEG	(NSEGMAP - NUSEG)
142 
143 #define VA_SEGNUM(x)	((u_int)(x) >> SEGSHIFT)
144 
145 /*
146  * Only "main memory" pages are registered in the pv_lists.
147  * This macro is used to determine if a given pte refers to
148  * "main memory" or not.  One slight hack here deserves more
149  * explanation:  The Sun frame buffers all appear as PG_OBMEM
150  * devices but way up near the end of the address space.
151  * We do not want to consider these as "main memory" so the
152  * macro below treats the high bits of the PFN as type bits.
153  *
154  * Note that on the 3/60 only 16 bits of PFN are stored in the
155  * MMU and the top 3 bits read back as zero.  This means a
156  * translation entered into the mmu for physical address
157  * 0xFF000000 will look like 0x1F000000 after one reads back
158  * the pte and converts the PFN to a physical address.
159  */
160 #define MEM_BITS	(PG_TYPE | PA_PGNUM(0xF8000000))
161 #define	IS_MAIN_MEM(pte) (((pte) & MEM_BITS) == 0)
162 
163 /* Does this (pseudo) PA represent device space? */
164 #define PA_DEV_MASK   (0xF8000000 | PMAP_TYPE)
165 #define PA_IS_DEV(pa) ((pa) & PA_DEV_MASK)
166 
167 /*
168  * Is there a Virtually Addressed Cache (VAC) alias problem
169  * if one page is mapped at both a1 and a2?
170  */
171 #define	BADALIAS(a1, a2)	(((int)(a1) ^ (int)(a2)) & SEGOFSET)
172 
173 
174 /*
175  * Debugging support.
176  */
177 #define	PMD_ENTER	1
178 #define	PMD_LINK	2
179 #define	PMD_PROTECT	4
180 #define	PMD_SWITCH	8
181 #define PMD_COW		0x10
182 #define PMD_MODBIT	0x20
183 #define PMD_REFBIT	0x40
184 #define PMD_WIRING	0x80
185 #define PMD_CONTEXT	0x100
186 #define PMD_CREATE	0x200
187 #define PMD_SEGMAP	0x400
188 #define PMD_SETPTE	0x800
189 #define PMD_FAULT	0x1000
190 #define PMD_KMAP	0x2000
191 
192 #define	PMD_REMOVE	PMD_ENTER
193 #define	PMD_UNLINK	PMD_LINK
194 
195 #ifdef	PMAP_DEBUG
196 int pmap_debug = 0;
197 int pmap_db_watchva = -1;
198 int pmap_db_watchpmeg = -1;
199 #endif	/* PMAP_DEBUG */
200 
201 /*
202  * Miscellaneous variables.
203  *
204  * For simplicity, this interface retains the variables
205  * that were used in the old interface (without NONCONTIG).
206  * These are set in pmap_bootstrap() and used in
207  * pmap_next_page().
208  */
209 vaddr_t virtual_avail, virtual_end;
210 paddr_t avail_start, avail_end;
211 #define	managed(pa)	(((pa) >= avail_start) && ((pa) < avail_end))
212 
213 /* used to skip the Sun3/50 video RAM */
214 static vaddr_t hole_start, hole_size;
215 
216 /* This is for pmap_next_page() */
217 static paddr_t avail_next;
218 
219 /* This is where we map a PMEG without a context. */
220 static vaddr_t temp_seg_va;
221 
222 /*
223  * Location to store virtual addresses
224  * to be used in copy/zero operations.
225  */
226 vaddr_t tmp_vpages[2] = {
227 	SUN3_MONSHORTSEG,
228 	SUN3_MONSHORTSEG + NBPG };
229 int tmp_vpages_inuse;
230 
231 static int pmap_version = 1;
232 struct pmap kernel_pmap_store;
233 #define kernel_pmap (&kernel_pmap_store)
234 static u_char kernel_segmap[NSEGMAP];
235 
236 /* memory pool for pmap structures */
237 struct pool	pmap_pmap_pool;
238 
239 /* statistics... */
240 struct pmap_stats {
241 	int	ps_enter_firstpv;	/* pv heads entered */
242 	int	ps_enter_secondpv;	/* pv nonheads entered */
243 	int	ps_unlink_pvfirst;	/* of pv_unlinks on head */
244 	int	ps_unlink_pvsearch;	/* of pv_unlink searches */
245 	int	ps_pmeg_faultin;	/* pmegs reloaded */
246 	int	ps_changeprots;		/* of calls to changeprot */
247 	int	ps_changewire;		/* useless wiring changes */
248 	int	ps_npg_prot_all;	/* of active pages protected */
249 	int	ps_npg_prot_actual;	/* pages actually affected */
250 	int	ps_vac_uncached;	/* non-cached due to bad alias */
251 	int	ps_vac_recached;	/* re-cached when bad alias gone */
252 } pmap_stats;
253 
254 #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
255 #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
256 #define pmap_add_ref(pmap) ++pmap->pm_refcount
257 #define pmap_del_ref(pmap) --pmap->pm_refcount
258 #define pmap_refcount(pmap) pmap->pm_refcount
259 
260 #ifdef	PMAP_DEBUG
261 #define	CHECK_SPL() do { \
262 	if ((getsr() & PSL_IPL) < PSL_IPL4) \
263 		panic("pmap: bad spl, line %d", __LINE__); \
264 } while (0)
265 #else	/* PMAP_DEBUG */
266 #define	CHECK_SPL() (void)0
267 #endif	/* PMAP_DEBUG */
268 
269 
270 /*
271  * PV support.
272  * (i.e. Find all virtual mappings of a physical page.)
273  */
274 
275 int pv_initialized = 0;
276 
277 /* One of these for each mapped virtual page. */
278 struct pv_entry {
279 	struct pv_entry *pv_next;
280 	pmap_t	       pv_pmap;
281 	vaddr_t        pv_va;
282 };
283 typedef struct pv_entry *pv_entry_t;
284 
285 /* Table of PV list heads (per physical page). */
286 static struct pv_entry **pv_head_tbl;
287 
288 /* Free list of PV entries. */
289 static struct pv_entry *pv_free_list;
290 
291 /* Table of flags (per physical page). */
292 static u_char *pv_flags_tbl;
293 
294 /* These are as in the MMU but shifted by PV_SHIFT. */
295 #define PV_SHIFT	24
296 #define PV_VALID  0x80
297 #define PV_WRITE  0x40
298 #define PV_SYSTEM 0x20
299 #define PV_NC     0x10
300 #define PV_PERM   0xF0
301 #define PV_TYPE   0x0C
302 #define PV_REF    0x02
303 #define PV_MOD    0x01
304 
305 
306 /*
307  * context structures, and queues
308  */
309 
310 struct context_state {
311 	TAILQ_ENTRY(context_state) context_link;
312 	int            context_num;
313 	struct pmap   *context_upmap;
314 };
315 typedef struct context_state *context_t;
316 
317 #define INVALID_CONTEXT -1	/* impossible value */
318 #define EMPTY_CONTEXT 0
319 #define FIRST_CONTEXT 1
320 #define	has_context(pmap)	((pmap)->pm_ctxnum != EMPTY_CONTEXT)
321 
322 TAILQ_HEAD(context_tailq, context_state)
323 	context_free_queue, context_active_queue;
324 
325 static struct context_state context_array[NCONTEXT];
326 
327 
328 /*
329  * PMEG structures, queues, and macros
330  */
331 #define PMEGQ_FREE     0
332 #define PMEGQ_INACTIVE 1
333 #define PMEGQ_ACTIVE   2
334 #define PMEGQ_KERNEL   3
335 #define PMEGQ_NONE     4
336 
337 struct pmeg_state {
338 	TAILQ_ENTRY(pmeg_state) pmeg_link;
339 	int            pmeg_index;
340 	pmap_t         pmeg_owner;
341 	int            pmeg_version;
342 	vaddr_t        pmeg_va;
343 	int            pmeg_wired;
344 	int            pmeg_reserved;
345 	int            pmeg_vpages;
346 	int            pmeg_qstate;
347 };
348 
349 typedef struct pmeg_state *pmeg_t;
350 
351 #define PMEG_INVAL (NPMEG-1)
352 #define PMEG_NULL (pmeg_t) NULL
353 
354 /* XXX - Replace pmeg_kernel_queue with pmeg_wired_queue ? */
355 TAILQ_HEAD(pmeg_tailq, pmeg_state)
356 	pmeg_free_queue, pmeg_inactive_queue,
357 	pmeg_active_queue, pmeg_kernel_queue;
358 
359 static struct pmeg_state pmeg_array[NPMEG];
360 
361 
362 /*
363  * prototypes
364  */
365 static int get_pte_pmeg __P((int, int));
366 static void set_pte_pmeg __P((int, int, int));
367 
368 static void context_allocate __P((pmap_t));
369 static void context_free __P((pmap_t));
370 static void context_init __P((void));
371 
372 static void pmeg_init __P((void));
373 static void pmeg_reserve __P((int));
374 
375 static pmeg_t pmeg_allocate __P((pmap_t, vaddr_t));
376 static void pmeg_mon_init __P((vaddr_t, vaddr_t, int));
377 static void pmeg_release __P((pmeg_t));
378 static void pmeg_free __P((pmeg_t));
379 static pmeg_t pmeg_cache __P((pmap_t, vaddr_t));
380 static void pmeg_set_wiring __P((pmeg_t, vaddr_t, int));
381 
382 static int  pv_link   __P((pmap_t, int, vaddr_t));
383 static void pv_unlink __P((pmap_t, int, vaddr_t));
384 static void pv_remove_all __P((paddr_t));
385 static void pv_changepte __P((paddr_t, int, int));
386 static u_int pv_syncflags __P((pv_entry_t));
387 static void pv_init __P((void));
388 
389 static void pmeg_clean __P((pmeg_t));
390 static void pmeg_clean_free __P((void));
391 
392 static void pmap_common_init __P((pmap_t));
393 static void pmap_kernel_init __P((pmap_t));
394 static void pmap_user_init __P((pmap_t));
395 static void pmap_page_upload __P((void));
396 
397 static void pmap_enter_kernel __P((vaddr_t, int, boolean_t));
398 static void pmap_enter_user __P((pmap_t, vaddr_t, int, boolean_t));
399 
400 static void pmap_protect1 __P((pmap_t, vaddr_t, vaddr_t));
401 static void pmap_protect_mmu __P((pmap_t, vaddr_t, vaddr_t));
402 static void pmap_protect_noctx __P((pmap_t, vaddr_t, vaddr_t));
403 
404 static void pmap_remove1 __P((pmap_t, vaddr_t, vaddr_t));
405 static void pmap_remove_mmu __P((pmap_t, vaddr_t, vaddr_t));
406 static void pmap_remove_noctx __P((pmap_t, vaddr_t, vaddr_t));
407 
408 static int  pmap_fault_reload __P((struct pmap *, vaddr_t, int));
409 
410 /* Called only from locore.s and pmap.c */
411 void	_pmap_switch __P((pmap_t));
412 
413 #ifdef	PMAP_DEBUG
414 void pmap_print __P((pmap_t));
415 void pv_print __P((struct vm_page *));
416 void pmeg_print __P((pmeg_t));
417 static void pmeg_verify_empty __P((vaddr_t));
418 #endif	/* PMAP_DEBUG */
419 void pmap_pinit __P((pmap_t));
420 void pmap_release __P((pmap_t));
421 
422 /*
423  * Various in-line helper functions.
424  */
425 
426 static inline pmap_t
427 current_pmap __P((void))
428 {
429 	struct proc *p;
430 	struct vmspace *vm;
431 	struct vm_map *map;
432 	pmap_t	pmap;
433 
434 	p = curproc;	/* XXX */
435 	if (p == NULL)
436 		pmap = kernel_pmap;
437 	else {
438 		vm = p->p_vmspace;
439 		map = &vm->vm_map;
440 		pmap = vm_map_pmap(map);
441 	}
442 
443 	return (pmap);
444 }
445 
446 static inline struct pv_entry **
447 pa_to_pvhead(paddr_t pa)
448 {
449 	int idx;
450 
451 	idx = PA_PGNUM(pa);
452 #ifdef	DIAGNOSTIC
453 	if (PA_IS_DEV(pa) || (idx >= physmem))
454 		panic("pmap:pa_to_pvhead: bad pa=0x%lx", pa);
455 #endif
456 	return (&pv_head_tbl[idx]);
457 }
458 
459 static inline u_char *
460 pa_to_pvflags(paddr_t pa)
461 {
462 	int idx;
463 
464 	idx = PA_PGNUM(pa);
465 #ifdef	DIAGNOSTIC
466 	if (PA_IS_DEV(pa) || (idx >= physmem))
467 		panic("pmap:pa_to_pvflags: bad pa=0x%lx", pa);
468 #endif
469 	return (&pv_flags_tbl[idx]);
470 }
471 
472 /*
473  * Save the MOD bit from the given PTE using its PA
474  */
475 static inline void
476 save_modref_bits(int pte)
477 {
478 	u_char *pv_flags;
479 
480 	pv_flags = pa_to_pvflags(PG_PA(pte));
481 	*pv_flags |= ((pte & PG_MODREF) >> PV_SHIFT);
482 }
483 
484 static inline pmeg_t
485 pmeg_p(int sme)
486 {
487 #ifdef	DIAGNOSTIC
488 	if (sme < 0 || sme >= SEGINV)
489 		panic("pmeg_p: bad sme");
490 #endif
491 	return &pmeg_array[sme];
492 }
493 
494 #define is_pmeg_wired(pmegp) (pmegp->pmeg_wired != 0)
495 
496 static void
497 pmeg_set_wiring(pmegp, va, flag)
498 	pmeg_t pmegp;
499 	vaddr_t va;
500 	int flag;
501 {
502 	int idx, mask;
503 
504 	idx = VA_PTE_NUM(va);
505 	mask = 1 << idx;
506 
507 	if (flag)
508 		pmegp->pmeg_wired |= mask;
509 	else
510 		pmegp->pmeg_wired &= ~mask;
511 }
512 
513 /****************************************************************
514  * Context management functions.
515  */
516 
517 /* part of pmap_bootstrap */
518 static void
519 context_init()
520 {
521 	int i;
522 
523 	TAILQ_INIT(&context_free_queue);
524 	TAILQ_INIT(&context_active_queue);
525 
526 	/* Leave EMPTY_CONTEXT out of the free list. */
527 	context_array[0].context_upmap = kernel_pmap;
528 
529 	for (i = 1; i < NCONTEXT; i++) {
530 		context_array[i].context_num = i;
531 		context_array[i].context_upmap = NULL;
532 		TAILQ_INSERT_TAIL(&context_free_queue, &context_array[i],
533 						  context_link);
534 #ifdef	PMAP_DEBUG
535 		if (pmap_debug & PMD_CONTEXT)
536 			printf("context_init: sizeof(context_array[0])=%d\n",
537 				   sizeof(context_array[0]));
538 #endif
539 	}
540 }
541 
542 /* Get us a context (steal one if necessary). */
543 static void
544 context_allocate(pmap)
545 	pmap_t pmap;
546 {
547 	context_t context;
548 
549 	CHECK_SPL();
550 
551 #ifdef	DIAGNOSTIC
552 	if (pmap == kernel_pmap)
553 		panic("context_allocate: kernel_pmap");
554 	if (has_context(pmap))
555 		panic("pmap: pmap already has context allocated to it");
556 #endif
557 
558 	context = TAILQ_FIRST(&context_free_queue);
559 	if (context == NULL) {
560 		/* Steal the head of the active queue. */
561 		context = TAILQ_FIRST(&context_active_queue);
562 		if (context == NULL)
563 			panic("pmap: no contexts left?");
564 #ifdef	PMAP_DEBUG
565 		if (pmap_debug & PMD_CONTEXT)
566 			printf("context_allocate: steal ctx %d from pmap %p\n",
567 				   context->context_num, context->context_upmap);
568 #endif
569 		context_free(context->context_upmap);
570 		context = TAILQ_FIRST(&context_free_queue);
571 	}
572 	TAILQ_REMOVE(&context_free_queue, context, context_link);
573 
574 #ifdef DIAGNOSTIC
575 	if (context->context_upmap != NULL)
576 		panic("pmap: context in use???");
577 #endif
578 
579 	context->context_upmap = pmap;
580 	pmap->pm_ctxnum = context->context_num;
581 
582 	TAILQ_INSERT_TAIL(&context_active_queue, context, context_link);
583 
584 	/*
585 	 * We could reload the MMU here, but that would
586 	 * artificially move PMEGs from the inactive queue
587 	 * to the active queue, so do lazy reloading.
588 	 * XXX - Need to reload wired pmegs though...
589 	 * XXX: Verify the context it is empty?
590 	 */
591 }
592 
593 /*
594  * Unload the context and put it on the free queue.
595  */
596 static void
597 context_free(pmap)		/* :) */
598 	pmap_t pmap;
599 {
600 	int saved_ctxnum, ctxnum;
601 	int i, sme;
602 	context_t contextp;
603 	vaddr_t va;
604 
605 	CHECK_SPL();
606 
607 	ctxnum = pmap->pm_ctxnum;
608 	if (ctxnum < FIRST_CONTEXT || ctxnum >= NCONTEXT)
609 		panic("pmap: context_free ctxnum");
610 	contextp = &context_array[ctxnum];
611 
612 	/* Temporary context change. */
613 	saved_ctxnum = get_context();
614 	set_context(ctxnum);
615 
616 	/* Before unloading translations, flush cache. */
617 #ifdef	HAVECACHE
618 	if (cache_size)
619 		cache_flush_context();
620 #endif
621 
622 	/* Unload MMU (but keep in SW segmap). */
623 	for (i=0, va=0; i < NUSEG; i++, va+=NBSG) {
624 
625 #if !defined(PMAP_DEBUG)
626 		/* Short-cut using the S/W segmap (if !debug). */
627 		if (pmap->pm_segmap[i] == SEGINV)
628 			continue;
629 #endif
630 
631 		/* Check the H/W segmap. */
632 		sme = get_segmap(va);
633 		if (sme == SEGINV)
634 			continue;
635 
636 		/* Found valid PMEG in the segmap. */
637 #ifdef	PMAP_DEBUG
638 		if (pmap_debug & PMD_SEGMAP)
639 			printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x new=ff (cf)\n",
640 				   ctxnum, va, sme);
641 #endif
642 #ifdef	DIAGNOSTIC
643 		if (sme != pmap->pm_segmap[i])
644 			panic("context_free: unknown sme at va=0x%lx", va);
645 #endif
646 		/* Did cache flush above (whole context). */
647 		set_segmap(va, SEGINV);
648 		/* In this case, do not clear pm_segmap. */
649 		/* XXX: Maybe inline this call? */
650 		pmeg_release(pmeg_p(sme));
651 	}
652 
653 	/* Restore previous context. */
654 	set_context(saved_ctxnum);
655 
656 	/* Dequeue, update, requeue. */
657 	TAILQ_REMOVE(&context_active_queue, contextp, context_link);
658 	pmap->pm_ctxnum = EMPTY_CONTEXT;
659 	contextp->context_upmap = NULL;
660 	TAILQ_INSERT_TAIL(&context_free_queue, contextp, context_link);
661 }
662 
663 
664 /****************************************************************
665  * PMEG management functions.
666  */
667 
668 static void
669 pmeg_init()
670 {
671 	int x;
672 
673 	/* clear pmeg array, put it all on the free pmeq queue */
674 
675 	TAILQ_INIT(&pmeg_free_queue);
676 	TAILQ_INIT(&pmeg_inactive_queue);
677 	TAILQ_INIT(&pmeg_active_queue);
678 	TAILQ_INIT(&pmeg_kernel_queue);
679 
680 	memset(pmeg_array, 0, NPMEG*sizeof(struct pmeg_state));
681 	for (x =0 ; x<NPMEG; x++) {
682 		TAILQ_INSERT_TAIL(&pmeg_free_queue, &pmeg_array[x],
683 				  pmeg_link);
684 		pmeg_array[x].pmeg_qstate = PMEGQ_FREE;
685 		pmeg_array[x].pmeg_index = x;
686 	}
687 
688 	/* The last pmeg is not usable. */
689 	pmeg_reserve(SEGINV);
690 }
691 
692 /*
693  * Reserve a pmeg (forever) for use by PROM, etc.
694  * Contents are left as-is.  Called very early...
695  */
696 void
697 pmeg_reserve(sme)
698 	int sme;
699 {
700 	pmeg_t pmegp;
701 
702 	/* Can not use pmeg_p() because it fails on SEGINV. */
703 	pmegp = &pmeg_array[sme];
704 
705 	if (pmegp->pmeg_reserved) {
706 		mon_printf("pmeg_reserve: already reserved\n");
707 		sunmon_abort();
708 	}
709 	if (pmegp->pmeg_owner) {
710 		mon_printf("pmeg_reserve: already owned\n");
711 		sunmon_abort();
712 	}
713 
714 	/* Owned by kernel, but not really usable... */
715 	pmegp->pmeg_owner = kernel_pmap;
716 	pmegp->pmeg_reserved++;	/* keep count, just in case */
717 	TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
718 	pmegp->pmeg_qstate = PMEGQ_NONE;
719 }
720 
721 /*
722  * Examine PMEGs used by the monitor, and either
723  * reserve them (keep=1) or clear them (keep=0)
724  */
725 static void
726 pmeg_mon_init(sva, eva, keep)
727 	vaddr_t sva, eva;
728 	int keep;	/* true: steal, false: clear */
729 {
730 	vaddr_t pgva, endseg;
731 	int pte, valid;
732 	unsigned char sme;
733 
734 #ifdef	PMAP_DEBUG
735 	if (pmap_debug & PMD_SEGMAP)
736 		mon_printf("pmeg_mon_init(0x%x, 0x%x, %d)\n",
737 		           sva, eva, keep);
738 #endif
739 
740 	sva &= ~(NBSG-1);
741 
742 	while (sva < eva) {
743 		sme = get_segmap(sva);
744 		if (sme != SEGINV) {
745 			valid = 0;
746 			endseg = sva + NBSG;
747 			for (pgva = sva; pgva < endseg; pgva += NBPG) {
748 				pte = get_pte(pgva);
749 				if (pte & PG_VALID) {
750 					valid++;
751 				}
752 			}
753 #ifdef	PMAP_DEBUG
754 			if (pmap_debug & PMD_SEGMAP)
755 				mon_printf(" sva=0x%x seg=0x%x valid=%d\n",
756 				           sva, sme, valid);
757 #endif
758 			if (keep && valid)
759 				pmeg_reserve(sme);
760 			else set_segmap(sva, SEGINV);
761 		}
762 		sva += NBSG;
763 	}
764 }
765 
766 /*
767  * This is used only during pmap_bootstrap, so we can
768  * get away with borrowing a slot in the segmap.
769  */
770 static void
771 pmeg_clean(pmegp)
772 	pmeg_t pmegp;
773 {
774 	int sme;
775 	vaddr_t va;
776 
777 	sme = get_segmap(0);
778 	if (sme != SEGINV)
779 		panic("pmeg_clean");
780 
781 	sme = pmegp->pmeg_index;
782 	set_segmap(0, sme);
783 
784 	for (va = 0; va < NBSG; va += NBPG)
785 		set_pte(va, PG_INVAL);
786 
787 	set_segmap(0, SEGINV);
788 }
789 
790 /*
791  * This routine makes sure that pmegs on the pmeg_free_queue contain
792  * no valid ptes.  It pulls things off the queue, cleans them, and
793  * puts them at the end.  The ending condition is finding the first
794  * queue element at the head of the queue again.
795  */
796 static void
797 pmeg_clean_free()
798 {
799 	pmeg_t pmegp, pmegp_first;
800 
801 	pmegp = TAILQ_FIRST(&pmeg_free_queue);
802 	if (pmegp == NULL)
803 		panic("pmap: no free pmegs available to clean");
804 
805 	pmegp_first = NULL;
806 
807 	for (;;) {
808 		pmegp = TAILQ_FIRST(&pmeg_free_queue);
809 		TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
810 
811 		pmegp->pmeg_qstate = PMEGQ_NONE;
812 		pmeg_clean(pmegp);
813 		pmegp->pmeg_qstate = PMEGQ_FREE;
814 
815 		TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
816 
817 		if (pmegp == pmegp_first)
818 			break;
819 		if (pmegp_first == NULL)
820 			pmegp_first = pmegp;
821 	}
822 }
823 
824 /*
825  * Allocate a PMEG by whatever means necessary.
826  * (May invalidate some mappings!)
827  */
828 static pmeg_t
829 pmeg_allocate(pmap, va)
830 	pmap_t pmap;
831 	vaddr_t va;
832 {
833 	pmeg_t pmegp;
834 
835 	CHECK_SPL();
836 
837 #ifdef	DIAGNOSTIC
838 	if (va & SEGOFSET) {
839 		panic("pmap:pmeg_allocate: va=0x%lx", va);
840 	}
841 #endif
842 
843 	/* Get one onto the free list if necessary. */
844 	pmegp = TAILQ_FIRST(&pmeg_free_queue);
845 	if (!pmegp) {
846 		/* Try inactive queue... */
847 		pmegp = TAILQ_FIRST(&pmeg_inactive_queue);
848 		if (!pmegp) {
849 			/* Try active queue... */
850 			pmegp = TAILQ_FIRST(&pmeg_active_queue);
851 		}
852 		if (!pmegp) {
853 			panic("pmeg_allocate: failed");
854 		}
855 		/*
856 		 * Remove mappings to free-up a pmeg
857 		 * (so it will go onto the free list).
858 		 * XXX - Skip this one if it is wired?
859 		 */
860 		pmap_remove1(pmegp->pmeg_owner,
861 		             pmegp->pmeg_va,
862 		             pmegp->pmeg_va + NBSG);
863 	}
864 
865 	/* OK, free list has something for us to take. */
866 	pmegp = TAILQ_FIRST(&pmeg_free_queue);
867 #ifdef	DIAGNOSTIC
868 	if (pmegp == NULL)
869 		panic("pmeg_allocagte: still none free?");
870 	if ((pmegp->pmeg_qstate != PMEGQ_FREE) ||
871 		(pmegp->pmeg_index == SEGINV) ||
872 		(pmegp->pmeg_vpages))
873 		panic("pmeg_allocate: bad pmegp=%p", pmegp);
874 #endif
875 #ifdef	PMAP_DEBUG
876 	if (pmegp->pmeg_index == pmap_db_watchpmeg) {
877 		db_printf("pmeg_allocate: watch pmegp=%p\n", pmegp);
878 		Debugger();
879 	}
880 #endif
881 
882 	TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
883 
884 	/* Reassign this PMEG for the caller. */
885 	pmegp->pmeg_owner = pmap;
886 	pmegp->pmeg_version = pmap->pm_version;
887 	pmegp->pmeg_va = va;
888 	pmegp->pmeg_wired = 0;
889 	pmegp->pmeg_reserved  = 0;
890 	pmegp->pmeg_vpages  = 0;
891 	if (pmap == kernel_pmap) {
892 		TAILQ_INSERT_TAIL(&pmeg_kernel_queue, pmegp, pmeg_link);
893 		pmegp->pmeg_qstate = PMEGQ_KERNEL;
894 	} else {
895 		TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
896 		pmegp->pmeg_qstate = PMEGQ_ACTIVE;
897 	}
898 	/* Caller will verify that it's empty (if debugging). */
899 	return pmegp;
900 }
901 
902 /*
903  * Put pmeg on the inactive queue, leaving its contents intact.
904  * This happens when we loose our context.  We may reclaim
905  * this pmeg later if it is still in the inactive queue.
906  */
907 static void
908 pmeg_release(pmegp)
909 	pmeg_t pmegp;
910 {
911 
912 	CHECK_SPL();
913 
914 #ifdef	DIAGNOSTIC
915 	if ((pmegp->pmeg_owner == kernel_pmap) ||
916 		(pmegp->pmeg_qstate != PMEGQ_ACTIVE))
917 		panic("pmeg_release: bad pmeg=%p", pmegp);
918 #endif
919 
920 	TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
921 	pmegp->pmeg_qstate = PMEGQ_INACTIVE;
922 	TAILQ_INSERT_TAIL(&pmeg_inactive_queue, pmegp, pmeg_link);
923 }
924 
925 /*
926  * Move the pmeg to the free queue from wherever it is.
927  * The pmeg will be clean.  It might be in kernel_pmap.
928  */
929 static void
930 pmeg_free(pmegp)
931 	pmeg_t pmegp;
932 {
933 
934 	CHECK_SPL();
935 
936 #ifdef	DIAGNOSTIC
937 	/* Caller should verify that it's empty. */
938 	if (pmegp->pmeg_vpages != 0)
939 		panic("pmeg_free: vpages");
940 #endif
941 
942 	switch (pmegp->pmeg_qstate) {
943 	case PMEGQ_ACTIVE:
944 		TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
945 		break;
946 	case PMEGQ_INACTIVE:
947 		TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
948 		break;
949 	case PMEGQ_KERNEL:
950 		TAILQ_REMOVE(&pmeg_kernel_queue, pmegp, pmeg_link);
951 		break;
952 	default:
953 		panic("pmeg_free: releasing bad pmeg");
954 		break;
955 	}
956 
957 #ifdef	PMAP_DEBUG
958 	if (pmegp->pmeg_index == pmap_db_watchpmeg) {
959 		db_printf("pmeg_free: watch pmeg 0x%x\n",
960 			   pmegp->pmeg_index);
961 		Debugger();
962 	}
963 #endif
964 
965 	pmegp->pmeg_owner = NULL;
966 	pmegp->pmeg_qstate = PMEGQ_FREE;
967 	TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
968 }
969 
970 /*
971  * Find a PMEG that was put on the inactive queue when we
972  * had our context stolen.  If found, move to active queue.
973  */
974 static pmeg_t
975 pmeg_cache(pmap, va)
976 	pmap_t pmap;
977 	vaddr_t va;
978 {
979 	int sme, segnum;
980 	pmeg_t pmegp;
981 
982 	CHECK_SPL();
983 
984 #ifdef	DIAGNOSTIC
985 	if (pmap == kernel_pmap)
986 		panic("pmeg_cache: kernel_pmap");
987 	if (va & SEGOFSET) {
988 		panic("pmap:pmeg_cache: va=0x%lx", va);
989 	}
990 #endif
991 
992 	if (pmap->pm_segmap == NULL)
993 		return PMEG_NULL;
994 
995 	segnum = VA_SEGNUM(va);
996 	if (segnum > NUSEG)		/* out of range */
997 		return PMEG_NULL;
998 
999 	sme = pmap->pm_segmap[segnum];
1000 	if (sme == SEGINV)	/* nothing cached */
1001 		return PMEG_NULL;
1002 
1003 	pmegp = pmeg_p(sme);
1004 
1005 #ifdef	PMAP_DEBUG
1006 	if (pmegp->pmeg_index == pmap_db_watchpmeg) {
1007 		db_printf("pmeg_cache: watch pmeg 0x%x\n", pmegp->pmeg_index);
1008 		Debugger();
1009 	}
1010 #endif
1011 
1012 	/*
1013 	 * Our segmap named a PMEG.  If it is no longer ours,
1014 	 * invalidate that entry in our segmap and return NULL.
1015 	 */
1016 	if ((pmegp->pmeg_owner != pmap) ||
1017 		(pmegp->pmeg_version != pmap->pm_version) ||
1018 		(pmegp->pmeg_va != va))
1019 	{
1020 #ifdef	PMAP_DEBUG
1021 		db_printf("pmap:pmeg_cache: invalid pmeg: sme=0x%x\n", sme);
1022 		pmeg_print(pmegp);
1023 		Debugger();
1024 #endif
1025 		pmap->pm_segmap[segnum] = SEGINV;
1026 		return PMEG_NULL; /* cache lookup failed */
1027 	}
1028 
1029 #ifdef	DIAGNOSTIC
1030 	/* Make sure it is on the inactive queue. */
1031 	if (pmegp->pmeg_qstate != PMEGQ_INACTIVE)
1032 		panic("pmeg_cache: pmeg was taken: %p", pmegp);
1033 #endif
1034 
1035 	TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
1036 	pmegp->pmeg_qstate = PMEGQ_ACTIVE;
1037 	TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
1038 
1039 	return pmegp;
1040 }
1041 
1042 #ifdef	PMAP_DEBUG
1043 static void
1044 pmeg_verify_empty(va)
1045 	vaddr_t va;
1046 {
1047 	vaddr_t eva;
1048 	int pte;
1049 
1050 	for (eva = va + NBSG;  va < eva; va += NBPG) {
1051 		pte = get_pte(va);
1052 		if (pte & PG_VALID)
1053 			panic("pmeg_verify_empty");
1054 	}
1055 }
1056 #endif	/* PMAP_DEBUG */
1057 
1058 
1059 /****************************************************************
1060  * Physical-to-virutal lookup support
1061  *
1062  * Need memory for the pv_alloc/pv_free list heads
1063  * and elements.  We know how many to allocate since
1064  * there is one list head for each physical page, and
1065  * at most one element for each PMEG slot.
1066  */
1067 static void
1068 pv_init()
1069 {
1070 	int npp, nvp, sz;
1071 	pv_entry_t pv;
1072 	char *p;
1073 
1074 	/* total allocation size */
1075 	sz = 0;
1076 
1077 	/*
1078 	 * Data for each physical page.
1079 	 * Each "mod/ref" flag is a char.
1080 	 * Each PV head is a pointer.
1081 	 * Note physmem is in pages.
1082 	 */
1083 	npp = ALIGN(physmem);
1084 	sz += (npp * sizeof(*pv_flags_tbl));
1085 	sz += (npp * sizeof(*pv_head_tbl));
1086 
1087 	/*
1088 	 * Data for each virtual page (all PMEGs).
1089 	 * One pv_entry for each page frame.
1090 	 */
1091 	nvp = NPMEG * NPAGSEG;
1092 	sz += (nvp * sizeof(*pv_free_list));
1093 
1094 	/* Now allocate the whole thing. */
1095 	sz = m68k_round_page(sz);
1096 	p = (char *)uvm_km_alloc(kernel_map, sz);
1097 	if (p == NULL)
1098 		panic("pmap:pv_init: alloc failed");
1099 	memset(p, 0, sz);
1100 
1101 	/* Now divide up the space. */
1102 	pv_flags_tbl = (void *) p;
1103 	p += (npp * sizeof(*pv_flags_tbl));
1104 	pv_head_tbl = (void*) p;
1105 	p += (npp * sizeof(*pv_head_tbl));
1106 	pv_free_list = (void *)p;
1107 	p += (nvp * sizeof(*pv_free_list));
1108 
1109 	/* Finally, make pv_free_list into a list. */
1110 	for (pv = pv_free_list; (char *)pv < p; pv++)
1111 		pv->pv_next = &pv[1];
1112 	pv[-1].pv_next = 0;
1113 
1114 	pv_initialized++;
1115 }
1116 
1117 /*
1118  * Set or clear bits in all PTEs mapping a page.
1119  * Also does syncflags work while we are there...
1120  */
1121 static void
1122 pv_changepte(pa, set_bits, clear_bits)
1123 	paddr_t pa;
1124 	int set_bits;
1125 	int clear_bits;
1126 {
1127 	pv_entry_t *head, pv;
1128 	u_char *pv_flags;
1129 	pmap_t pmap;
1130 	vaddr_t va;
1131 	int pte, sme;
1132 	int saved_ctx;
1133 	boolean_t in_ctx;
1134 	u_int flags;
1135 
1136 	pv_flags = pa_to_pvflags(pa);
1137 	head     = pa_to_pvhead(pa);
1138 
1139 	/* If no mappings, no work to do. */
1140 	if (*head == NULL)
1141 		return;
1142 
1143 #ifdef	DIAGNOSTIC
1144 	/* This function should only clear these bits: */
1145 	if (clear_bits & ~(PG_WRITE | PG_NC | PG_REF | PG_MOD))
1146 		panic("pv_changepte: clear=0x%x", clear_bits);
1147 #endif
1148 
1149 	flags = 0;
1150 	saved_ctx = get_context();
1151 	for (pv = *head; pv != NULL; pv = pv->pv_next) {
1152 		pmap = pv->pv_pmap;
1153 		va = pv->pv_va;
1154 
1155 #ifdef	DIAGNOSTIC
1156 		if (pmap->pm_segmap == NULL)
1157 			panic("pv_changepte: null segmap");
1158 #endif
1159 
1160 		/* Is the PTE currently accessible in some context? */
1161 		in_ctx = FALSE;
1162 		sme = SEGINV;	/* kill warning */
1163 		if (pmap == kernel_pmap)
1164 			in_ctx = TRUE;
1165 		else if (has_context(pmap)) {
1166 			/* PMEG may be inactive. */
1167 			set_context(pmap->pm_ctxnum);
1168 			sme = get_segmap(va);
1169 			if (sme != SEGINV)
1170 				in_ctx = TRUE;
1171 		}
1172 
1173 		if (in_ctx == TRUE) {
1174 			/*
1175 			 * The PTE is in the current context.
1176 			 * Make sure PTE is up-to-date with VAC.
1177 			 */
1178 #ifdef	HAVECACHE
1179 			if (cache_size)
1180 				cache_flush_page(va);
1181 #endif
1182 			pte = get_pte(va);
1183 		} else {
1184 
1185 			/*
1186 			 * The PTE is not in any context.
1187 			 */
1188 
1189 			sme = pmap->pm_segmap[VA_SEGNUM(va)];
1190 #ifdef	DIAGNOSTIC
1191 			if (sme == SEGINV)
1192 				panic("pv_changepte: SEGINV");
1193 #endif
1194 			pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
1195 		}
1196 
1197 #ifdef	DIAGNOSTIC
1198 		/* PV entries point only to valid mappings. */
1199 		if ((pte & PG_VALID) == 0)
1200 			panic("pv_changepte: not PG_VALID at va=0x%lx", va);
1201 #endif
1202 		/* Get these while it's easy. */
1203 		if (pte & PG_MODREF) {
1204 			flags |= (pte & PG_MODREF);
1205 			pte &= ~PG_MODREF;
1206 		}
1207 
1208 		/* Finally, set and clear some bits. */
1209 		pte |= set_bits;
1210 		pte &= ~clear_bits;
1211 
1212 		if (in_ctx == TRUE) {
1213 			/* Did cache flush above. */
1214 			set_pte(va, pte);
1215 		} else {
1216 			set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
1217 		}
1218 	}
1219 	set_context(saved_ctx);
1220 
1221 	*pv_flags |= (flags >> PV_SHIFT);
1222 }
1223 
1224 /*
1225  * Return ref and mod bits from pvlist,
1226  * and turns off same in hardware PTEs.
1227  */
1228 static u_int
1229 pv_syncflags(pv)
1230 	pv_entry_t pv;
1231 {
1232 	pmap_t pmap;
1233 	vaddr_t va;
1234 	int pte, sme;
1235 	int saved_ctx;
1236 	boolean_t in_ctx;
1237 	u_int flags;
1238 
1239 	/* If no mappings, no work to do. */
1240 	if (pv == NULL)
1241 		return (0);
1242 
1243 	flags = 0;
1244 	saved_ctx = get_context();
1245 	for (; pv != NULL; pv = pv->pv_next) {
1246 		pmap = pv->pv_pmap;
1247 		va = pv->pv_va;
1248 		sme = SEGINV;
1249 
1250 #ifdef	DIAGNOSTIC
1251 		/*
1252 		 * Only the head may have a null pmap, and
1253 		 * we checked for that above.
1254 		 */
1255 		if (pmap->pm_segmap == NULL)
1256 			panic("pv_syncflags: null segmap");
1257 #endif
1258 
1259 		/* Is the PTE currently accessible in some context? */
1260 		in_ctx = FALSE;
1261 		if (pmap == kernel_pmap)
1262 			in_ctx = TRUE;
1263 		else if (has_context(pmap)) {
1264 			/* PMEG may be inactive. */
1265 			set_context(pmap->pm_ctxnum);
1266 			sme = get_segmap(va);
1267 			if (sme != SEGINV)
1268 				in_ctx = TRUE;
1269 		}
1270 
1271 		if (in_ctx == TRUE) {
1272 
1273 			/*
1274 			 * The PTE is in the current context.
1275 			 * Make sure PTE is up-to-date with VAC.
1276 			 */
1277 
1278 #ifdef	HAVECACHE
1279 			if (cache_size)
1280 				cache_flush_page(va);
1281 #endif
1282 			pte = get_pte(va);
1283 		} else {
1284 
1285 			/*
1286 			 * The PTE is not in any context.
1287 			 */
1288 
1289 			sme = pmap->pm_segmap[VA_SEGNUM(va)];
1290 #ifdef	DIAGNOSTIC
1291 			if (sme == SEGINV)
1292 				panic("pv_syncflags: SEGINV");
1293 #endif
1294 			pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
1295 		}
1296 
1297 #ifdef	DIAGNOSTIC
1298 		/* PV entries point only to valid mappings. */
1299 		if ((pte & PG_VALID) == 0)
1300 			panic("pv_syncflags: not PG_VALID at va=0x%lx", va);
1301 #endif
1302 		/* OK, do what we came here for... */
1303 		if (pte & PG_MODREF) {
1304 			flags |= (pte & PG_MODREF);
1305 			pte &= ~PG_MODREF;
1306 		}
1307 
1308 		if (in_ctx == TRUE) {
1309 			/* Did cache flush above. */
1310 			set_pte(va, pte);
1311 		} else {
1312 			set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
1313 		}
1314 	}
1315 	set_context(saved_ctx);
1316 
1317 	return (flags >> PV_SHIFT);
1318 }
1319 
1320 /* Remove all mappings for the physical page. */
1321 static void
1322 pv_remove_all(pa)
1323 	paddr_t pa;
1324 {
1325 	pv_entry_t *head, pv;
1326 	pmap_t pmap;
1327 	vaddr_t va;
1328 
1329 	CHECK_SPL();
1330 
1331 #ifdef PMAP_DEBUG
1332 	if (pmap_debug & PMD_REMOVE)
1333 		printf("pv_remove_all(0x%lx)\n", pa);
1334 #endif
1335 
1336 	head = pa_to_pvhead(pa);
1337 	while ((pv = *head) != NULL) {
1338 		pmap = pv->pv_pmap;
1339 		va   = pv->pv_va;
1340 		pmap_remove1(pmap, va, va + NBPG);
1341 #ifdef PMAP_DEBUG
1342 		/* Make sure it went away. */
1343 		if (pv == *head) {
1344 			db_printf("pv_remove_all: head unchanged for pa=0x%lx\n", pa);
1345 			Debugger();
1346 		}
1347 #endif
1348 	}
1349 }
1350 
1351 /*
1352  * The pmap system is asked to lookup all mappings that point to a
1353  * given physical memory address.  This function adds a new element
1354  * to the list of mappings maintained for the given physical address.
1355  * Returns PV_NC if the (new) pvlist says that the address cannot
1356  * be cached.
1357  */
1358 static int
1359 pv_link(pmap, pte, va)
1360 	pmap_t pmap;
1361 	int pte;
1362 	vaddr_t va;
1363 {
1364 	paddr_t pa;
1365 	pv_entry_t *head, pv;
1366 	u_char *pv_flags;
1367 	int flags;
1368 
1369 	if (!pv_initialized)
1370 		return 0;
1371 
1372 	CHECK_SPL();
1373 
1374 	/* Only the non-cached bit is of interest here. */
1375 	flags = (pte & PG_NC) ? PV_NC : 0;
1376 	pa = PG_PA(pte);
1377 
1378 #ifdef PMAP_DEBUG
1379 	if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1380 		printf("pv_link(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1381 		/* pv_print(pa); */
1382 	}
1383 #endif
1384 
1385 	pv_flags = pa_to_pvflags(pa);
1386 	head     = pa_to_pvhead(pa);
1387 
1388 #ifdef	DIAGNOSTIC
1389 	/* See if this mapping is already in the list. */
1390 	for (pv = *head; pv != NULL; pv = pv->pv_next) {
1391 		if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
1392 			panic("pv_link: duplicate entry for PA=0x%lx", pa);
1393 	}
1394 #endif
1395 #ifdef HAVECACHE
1396 
1397 	/*
1398 	 * Does this new mapping cause VAC alias problems?
1399 	 */
1400 
1401 	*pv_flags |= flags;
1402 	if ((*pv_flags & PV_NC) == 0) {
1403 		for (pv = *head; pv != NULL; pv = pv->pv_next) {
1404 			if (BADALIAS(va, pv->pv_va)) {
1405 				*pv_flags |= PV_NC;
1406 				pv_changepte(pa, PG_NC, 0);
1407 				pmap_stats.ps_vac_uncached++;
1408 				break;
1409 			}
1410 		}
1411 	}
1412 #endif
1413 
1414 	/* Allocate a PV element (pv_alloc()). */
1415 	pv = pv_free_list;
1416 	if (pv == NULL)
1417 		panic("pv_link: pv_alloc");
1418 	pv_free_list = pv->pv_next;
1419 	pv->pv_next = 0;
1420 
1421 	/* Insert new entry at the head. */
1422 	pv->pv_pmap = pmap;
1423 	pv->pv_va   = va;
1424 	pv->pv_next = *head;
1425 	*head = pv;
1426 
1427 	return (*pv_flags & PV_NC);
1428 }
1429 
1430 /*
1431  * pv_unlink is a helper function for pmap_remove.
1432  * It removes the appropriate (pmap, pa, va) entry.
1433  *
1434  * Once the entry is removed, if the pv_table head has the cache
1435  * inhibit bit set, see if we can turn that off; if so, walk the
1436  * pvlist and turn off PG_NC in each PTE.  (The pvlist is by
1437  * definition nonempty, since it must have at least two elements
1438  * in it to have PV_NC set, and we only remove one here.)
1439  */
1440 static void
1441 pv_unlink(pmap, pte, va)
1442 	pmap_t pmap;
1443 	int pte;
1444 	vaddr_t va;
1445 {
1446 	paddr_t pa;
1447 	pv_entry_t *head, *ppv, pv;
1448 	u_char *pv_flags;
1449 
1450 	CHECK_SPL();
1451 
1452 	pa = PG_PA(pte);
1453 #ifdef PMAP_DEBUG
1454 	if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1455 		printf("pv_unlink(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1456 		/* pv_print(pa); */
1457 	}
1458 #endif
1459 
1460 	pv_flags = pa_to_pvflags(pa);
1461 	head     = pa_to_pvhead(pa);
1462 
1463 	/*
1464 	 * Find the entry.
1465 	 */
1466 	ppv = head;
1467 	pv = *ppv;
1468 	while (pv) {
1469 		if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
1470 			goto found;
1471 		ppv = &pv->pv_next;
1472 		pv  =  pv->pv_next;
1473 	}
1474 #ifdef PMAP_DEBUG
1475 	db_printf("pv_unlink: not found (pa=0x%lx,va=0x%lx)\n", pa, va);
1476 	Debugger();
1477 #endif
1478 	return;
1479 
1480 found:
1481 	/* Unlink this entry from the list and clear it. */
1482 	*ppv = pv->pv_next;
1483 	pv->pv_pmap = NULL;
1484 	pv->pv_va   = 0;
1485 
1486 	/* Insert it on the head of the free list. (pv_free()) */
1487 	pv->pv_next = pv_free_list;
1488 	pv_free_list = pv;
1489 	pv = NULL;
1490 
1491 	/* Do any non-cached mappings remain? */
1492 	if ((*pv_flags & PV_NC) == 0)
1493 		return;
1494 	if ((pv = *head) == NULL)
1495 		return;
1496 
1497 	/*
1498 	 * Have non-cached mappings.  See if we can fix that now.
1499 	 */
1500 	va = pv->pv_va;
1501 	for (pv = pv->pv_next; pv != NULL; pv = pv->pv_next) {
1502 		/* If there is a DVMA mapping, leave it NC. */
1503 		if (va >= DVMA_MAP_BASE)
1504 			return;
1505 		/* If there are VAC alias problems, leave NC. */
1506 		if (BADALIAS(va, pv->pv_va))
1507 			return;
1508 	}
1509 	/* OK, there are no "problem" mappings. */
1510 	*pv_flags &= ~PV_NC;
1511 	pv_changepte(pa, 0, PG_NC);
1512 	pmap_stats.ps_vac_recached++;
1513 }
1514 
1515 
1516 /****************************************************************
1517  * Bootstrap and Initialization, etc.
1518  */
1519 
1520 void
1521 pmap_common_init(pmap)
1522 	pmap_t pmap;
1523 {
1524 	memset(pmap, 0, sizeof(struct pmap));
1525 	pmap->pm_refcount = 1;
1526 	pmap->pm_version = pmap_version++;
1527 	pmap->pm_ctxnum = EMPTY_CONTEXT;
1528 	simple_lock_init(&pmap->pm_lock);
1529 }
1530 
1531 /*
1532  * Prepare the kernel for VM operations.
1533  * This is called by locore2.c:_vm_init()
1534  * after the "start/end" globals are set.
1535  * This function must NOT leave context zero.
1536  */
1537 void
1538 pmap_bootstrap(nextva)
1539 	vaddr_t nextva;
1540 {
1541 	struct sunromvec *rvec;
1542 	vaddr_t va, eva;
1543 	int i, pte, sme;
1544 	extern char etext[];
1545 
1546 	nextva = m68k_round_page(nextva);
1547 	rvec = romVectorPtr;
1548 
1549 	/* Steal some special-purpose, already mapped pages? */
1550 
1551 	/*
1552 	 * Determine the range of kernel virtual space available.
1553 	 * It is segment-aligned to simplify PMEG management.
1554 	 */
1555 	virtual_avail = m68k_round_seg(nextva);
1556 	virtual_end = VM_MAX_KERNEL_ADDRESS;
1557 
1558 	/*
1559 	 * Determine the range of physical memory available.
1560 	 * Physical memory at zero was remapped to KERNBASE.
1561 	 */
1562 	avail_start = nextva - KERNBASE;
1563 	if (rvec->romvecVersion < 1) {
1564 		mon_printf("Warning: ancient PROM version=%d\n",
1565 				   rvec->romvecVersion);
1566 		/* Guess that PROM version 0.X used two pages. */
1567 		avail_end = *rvec->memorySize - (2*NBPG);
1568 	} else {
1569 		/* PROM version 1 or later. */
1570 		avail_end = *rvec->memoryAvail;
1571 	}
1572 	avail_end = m68k_trunc_page(avail_end);
1573 
1574 	/*
1575 	 * Report the actual amount of physical memory,
1576 	 * even though the PROM takes a few pages.
1577 	 */
1578 	physmem = (btoc(avail_end) + 0xF) & ~0xF;
1579 
1580 	/*
1581 	 * On the Sun3/50, the video frame buffer is located at
1582 	 * physical addres 1MB so we must step over it.
1583 	 */
1584 	if (cpu_machine_id == SUN3_MACH_50) {
1585 		hole_start = m68k_trunc_page(OBMEM_BW50_ADDR);
1586 		hole_size  = m68k_round_page(OBMEM_BW2_SIZE);
1587 		if (avail_start > hole_start) {
1588 			mon_printf("kernel too large for Sun3/50\n");
1589 			sunmon_abort();
1590 		}
1591 	}
1592 
1593 	/*
1594 	 * Done allocating PAGES of virtual space, so
1595 	 * clean out the rest of the last used segment.
1596 	 */
1597 	for (va = nextva; va < virtual_avail; va += NBPG)
1598 		set_pte(va, PG_INVAL);
1599 
1600 	/*
1601 	 * Now that we are done stealing physical pages, etc.
1602 	 * figure out which PMEGs are used by those mappings
1603 	 * and either reserve them or clear them out.
1604 	 * -- but first, init PMEG management.
1605 	 * This puts all PMEGs in the free list.
1606 	 * We will allocte the in-use ones.
1607 	 */
1608 	pmeg_init();
1609 
1610 	/*
1611 	 * Unmap user virtual segments.
1612 	 * VA range: [0 .. KERNBASE]
1613 	 */
1614 	for (va = 0; va < KERNBASE; va += NBSG)
1615 		set_segmap(va, SEGINV);
1616 
1617 	/*
1618 	 * Reserve PMEGS for kernel text/data/bss
1619 	 * and the misc pages taken above.
1620 	 * VA range: [KERNBASE .. virtual_avail]
1621 	 */
1622 	for ( ; va < virtual_avail; va += NBSG) {
1623 		sme = get_segmap(va);
1624 		if (sme == SEGINV) {
1625 			mon_printf("kernel text/data/bss not mapped\n");
1626 			sunmon_abort();
1627 		}
1628 		pmeg_reserve(sme);
1629 	}
1630 
1631 	/*
1632 	 * Unmap kernel virtual space.  Make sure to leave no valid
1633 	 * segmap entries in the MMU unless pmeg_array records them.
1634 	 * VA range: [vseg_avail .. virtual_end]
1635 	 */
1636 	for ( ; va < virtual_end; va += NBSG)
1637 		set_segmap(va, SEGINV);
1638 
1639 	/*
1640 	 * Reserve PMEGs used by the PROM monitor (device mappings).
1641 	 * Free up any pmegs in this range which have no mappings.
1642 	 * VA range: [0x0FE00000 .. 0x0FF00000]
1643 	 */
1644 	pmeg_mon_init(SUN3_MONSTART, SUN3_MONEND, TRUE);
1645 
1646 	/*
1647 	 * Unmap any pmegs left in DVMA space by the PROM.
1648 	 * DO NOT kill the last one! (owned by the PROM!)
1649 	 * VA range: [0x0FF00000 .. 0x0FFE0000]
1650 	 */
1651 	pmeg_mon_init(SUN3_MONEND, SUN3_MONSHORTSEG, FALSE);
1652 
1653 	/*
1654 	 * MONSHORTSEG contains MONSHORTPAGE which is a data page
1655 	 * allocated by the PROM monitor.  Reserve the segment,
1656 	 * but clear out all but the last PTE inside it.
1657 	 * Note we use this for tmp_vpages.
1658 	 */
1659 	va  = SUN3_MONSHORTSEG;
1660 	eva = SUN3_MONSHORTPAGE;
1661 	sme = get_segmap(va);
1662 	pmeg_reserve(sme);
1663 	for ( ; va < eva; va += NBPG)
1664 		set_pte(va, PG_INVAL);
1665 
1666 	/*
1667 	 * Done reserving PMEGs and/or clearing out mappings.
1668 	 *
1669 	 * Now verify the mapping protections and such for the
1670 	 * important parts of the address space (in VA order).
1671 	 * Note that the Sun PROM usually leaves the memory
1672 	 * mapped with everything non-cached...
1673 	 */
1674 
1675 	/*
1676 	 * Map the message buffer page at a constant location
1677 	 * (physical address zero) so its contents will be
1678 	 * preserved through a reboot.
1679 	 */
1680 	va = KERNBASE;
1681 	pte = get_pte(va);
1682 	pte |= (PG_SYSTEM | PG_WRITE | PG_NC);
1683 	set_pte(va, pte);
1684 	va += NBPG;
1685 	/* Initialize msgbufaddr later, in machdep.c */
1686 
1687 	/* Next is the tmpstack page. */
1688 	pte = get_pte(va);
1689 	pte &= ~(PG_NC);
1690 	pte |= (PG_SYSTEM | PG_WRITE);
1691 	set_pte(va, pte);
1692 	va += NBPG;
1693 
1694 	/*
1695 	 * Next is the kernel text.
1696 	 *
1697 	 * Verify protection bits on kernel text/data/bss
1698 	 * All of kernel text, data, and bss are cached.
1699 	 * Text is read-only (except in db_write_ktext).
1700 	 */
1701 	eva = m68k_trunc_page(etext);
1702 	while (va < eva) {
1703 		pte = get_pte(va);
1704 		if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
1705 			mon_printf("invalid page at 0x%x\n", va);
1706 		}
1707 		pte &= ~(PG_WRITE|PG_NC);
1708 		/* Kernel text is read-only */
1709 		pte |= (PG_SYSTEM);
1710 		set_pte(va, pte);
1711 		va += NBPG;
1712 	}
1713 	/* data, bss, etc. */
1714 	while (va < nextva) {
1715 		pte = get_pte(va);
1716 		if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
1717 			mon_printf("invalid page at 0x%x\n", va);
1718 		}
1719 		pte &= ~(PG_NC);
1720 		pte |= (PG_SYSTEM | PG_WRITE);
1721 		set_pte(va, pte);
1722 		va += NBPG;
1723 	}
1724 
1725 	/*
1726 	 * Duplicate all mappings in the current context into
1727 	 * every other context.  We have to let the PROM do the
1728 	 * actual segmap manipulation because we can only switch
1729 	 * the MMU context after we are sure that the kernel is
1730 	 * identically mapped in all contexts.  The PROM can do
1731 	 * the job using hardware-dependent tricks...
1732 	 */
1733 #ifdef	DIAGNOSTIC
1734 	/* Note: PROM setcxsegmap function needs sfc=dfs=FC_CONTROL */
1735 	if ((getsfc() != FC_CONTROL) || (getdfc() != FC_CONTROL)) {
1736 		mon_printf("pmap_bootstrap: bad dfc or sfc\n");
1737 		sunmon_abort();
1738 	}
1739 	/* Near the beginning of locore.s we set context zero. */
1740 	if (get_context() != 0) {
1741 		mon_printf("pmap_bootstrap: not in context zero?\n");
1742 		sunmon_abort();
1743 	}
1744 #endif	/* DIAGNOSTIC */
1745 	for (va = 0; va < (vaddr_t) (NBSG * NSEGMAP); va += NBSG) {
1746 		/* Read the segmap entry from context zero... */
1747 		sme = get_segmap(va);
1748 		/* ... then copy it into all other contexts. */
1749 		for (i = 1; i < NCONTEXT; i++) {
1750 			(*rvec->setcxsegmap)(i, va, sme);
1751 		}
1752 	}
1753 
1754 	/*
1755 	 * Reserve a segment for the kernel to use to access a pmeg
1756 	 * that is not currently mapped into any context/segmap.
1757 	 * The kernel temporarily maps such a pmeg into this segment.
1758 	 *
1759 	 * XXX: Now that context zero is reserved as kernel-only,
1760 	 * we could borrow context zero for these temporary uses.
1761 	 */
1762 	temp_seg_va = virtual_avail;
1763 	virtual_avail += NBSG;
1764 #ifdef	DIAGNOSTIC
1765 	if (temp_seg_va & SEGOFSET) {
1766 		mon_printf("pmap_bootstrap: temp_seg_va\n");
1767 		sunmon_abort();
1768 	}
1769 #endif
1770 
1771 	/* Initialization for pmap_next_page() */
1772 	avail_next = avail_start;
1773 
1774 	uvmexp.pagesize = NBPG;
1775 	uvm_setpagesize();
1776 
1777 	/* after setting up some structures */
1778 
1779 	pmap_common_init(kernel_pmap);
1780 	pmap_kernel_init(kernel_pmap);
1781 
1782 	context_init();
1783 
1784 	pmeg_clean_free();
1785 
1786 	pmap_page_upload();
1787 }
1788 
1789 /*
1790  * Give the kernel pmap a segmap, just so there are not
1791  * so many special cases required.  Maybe faster too,
1792  * because this lets pmap_remove() and pmap_protect()
1793  * use a S/W copy of the segmap to avoid function calls.
1794  */
1795 void
1796 pmap_kernel_init(pmap)
1797 	 pmap_t pmap;
1798 {
1799 	vaddr_t va;
1800 	int i, sme;
1801 
1802 	for (i=0, va=0; i < NSEGMAP; i++, va+=NBSG) {
1803 		sme = get_segmap(va);
1804 		kernel_segmap[i] = sme;
1805 	}
1806 	pmap->pm_segmap = kernel_segmap;
1807 }
1808 
1809 
1810 /****************************************************************
1811  * PMAP interface functions.
1812  */
1813 
1814 /*
1815  * Support functions for vm_page_bootstrap().
1816  */
1817 
1818 /*
1819  * How much virtual space does this kernel have?
1820  * (After mapping kernel text, data, etc.)
1821  */
1822 void
1823 pmap_virtual_space(v_start, v_end)
1824 	vaddr_t *v_start;
1825 	vaddr_t *v_end;
1826 {
1827 	*v_start = virtual_avail;
1828 	*v_end   = virtual_end;
1829 }
1830 
1831 /* Provide memory to the VM system. */
1832 static void
1833 pmap_page_upload()
1834 {
1835 	int a, b, c, d;
1836 
1837 	if (hole_size) {
1838 		/*
1839 		 * Supply the memory in two segments so the
1840 		 * reserved memory (3/50 video ram at 1MB)
1841 		 * can be carved from the front of the 2nd.
1842 		 */
1843 		a = atop(avail_start);
1844 		b = atop(hole_start);
1845 		uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
1846 		c = atop(hole_start + hole_size);
1847 		d = atop(avail_end);
1848 		uvm_page_physload(b, d, c, d, VM_FREELIST_DEFAULT);
1849 	} else {
1850 		a = atop(avail_start);
1851 		d = atop(avail_end);
1852 		uvm_page_physload(a, d, a, d, VM_FREELIST_DEFAULT);
1853 	}
1854 }
1855 
1856 /*
1857  *	Initialize the pmap module.
1858  *	Called by vm_init, to initialize any structures that the pmap
1859  *	system needs to map virtual memory.
1860  */
1861 void
1862 pmap_init()
1863 {
1864 	pv_init();
1865 
1866 	/* Initialize the pmap pool. */
1867 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1868 	    &pool_allocator_nointr);
1869 }
1870 
1871 /*
1872  * Map a range of kernel virtual address space.
1873  * This might be used for device mappings, or to
1874  * record the mapping for kernel text/data/bss.
1875  * Return VA following the mapped range.
1876  */
1877 vaddr_t
1878 pmap_map(va, pa, endpa, prot)
1879 	vaddr_t	va;
1880 	paddr_t	pa;
1881 	paddr_t	endpa;
1882 	int		prot;
1883 {
1884 	int sz;
1885 
1886 	sz = endpa - pa;
1887 	do {
1888 		pmap_enter(kernel_pmap, va, pa, prot, 0);
1889 		va += NBPG;
1890 		pa += NBPG;
1891 		sz -= NBPG;
1892 	} while (sz > 0);
1893 	pmap_update(kernel_pmap);
1894 	return(va);
1895 }
1896 
1897 void
1898 pmap_user_init(pmap)
1899 	pmap_t pmap;
1900 {
1901 	int i;
1902 	pmap->pm_segmap = malloc(sizeof(char)*NUSEG, M_VMPMAP, M_WAITOK);
1903 	for (i = 0; i < NUSEG; i++) {
1904 		pmap->pm_segmap[i] = SEGINV;
1905 	}
1906 }
1907 
1908 /*
1909  *	Create and return a physical map.
1910  *
1911  *	If the size specified for the map
1912  *	is zero, the map is an actual physical
1913  *	map, and may be referenced by the
1914  *	hardware.
1915  *
1916  *	If the size specified is non-zero,
1917  *	the map will be used in software only, and
1918  *	is bounded by that size.
1919  */
1920 pmap_t
1921 pmap_create()
1922 {
1923 	pmap_t pmap;
1924 
1925 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1926 	pmap_pinit(pmap);
1927 	return pmap;
1928 }
1929 
1930 /*
1931  * Release any resources held by the given physical map.
1932  * Called when a pmap initialized by pmap_pinit is being released.
1933  * Should only be called if the map contains no valid mappings.
1934  */
1935 void
1936 pmap_release(pmap)
1937 	struct pmap *pmap;
1938 {
1939 	int s;
1940 
1941 	s = splvm();
1942 
1943 	if (pmap == kernel_pmap)
1944 		panic("pmap_release: kernel_pmap!");
1945 
1946 	if (has_context(pmap)) {
1947 #ifdef	PMAP_DEBUG
1948 		if (pmap_debug & PMD_CONTEXT)
1949 			printf("pmap_release(%p): free ctx %d\n",
1950 				   pmap, pmap->pm_ctxnum);
1951 #endif
1952 		context_free(pmap);
1953 	}
1954 	free(pmap->pm_segmap, M_VMPMAP);
1955 	pmap->pm_segmap = NULL;
1956 
1957 	splx(s);
1958 }
1959 
1960 
1961 /*
1962  *	Retire the given physical map from service.
1963  *	Should only be called if the map contains
1964  *	no valid mappings.
1965  */
1966 void
1967 pmap_destroy(pmap)
1968 	pmap_t pmap;
1969 {
1970 	int count;
1971 
1972 #ifdef PMAP_DEBUG
1973 	if (pmap_debug & PMD_CREATE)
1974 		printf("pmap_destroy(%p)\n", pmap);
1975 #endif
1976 	if (pmap == kernel_pmap)
1977 		panic("pmap_destroy: kernel_pmap!");
1978 	pmap_lock(pmap);
1979 	count = pmap_del_ref(pmap);
1980 	pmap_unlock(pmap);
1981 	if (count == 0) {
1982 		pmap_release(pmap);
1983 		pool_put(&pmap_pmap_pool, pmap);
1984 	}
1985 }
1986 
1987 /*
1988  *	Add a reference to the specified pmap.
1989  */
1990 void
1991 pmap_reference(pmap)
1992 	pmap_t	pmap;
1993 {
1994 	pmap_lock(pmap);
1995 	pmap_add_ref(pmap);
1996 	pmap_unlock(pmap);
1997 }
1998 
1999 
2000 /*
2001  *	Insert the given physical page (p) at
2002  *	the specified virtual address (v) in the
2003  *	target physical map with the protection requested.
2004  *
2005  *	The physical address is page aligned, but may have some
2006  *	low bits set indicating an OBIO or VME bus page, or just
2007  *	that the non-cache bit should be set (i.e PMAP_NC).
2008  *
2009  *	If specified, the page will be wired down, meaning
2010  *	that the related pte can not be reclaimed.
2011  *
2012  *	NB:  This is the only routine which MAY NOT lazy-evaluate
2013  *	or lose information.  That is, this routine must actually
2014  *	insert this page into the given map NOW.
2015  */
2016 int
2017 pmap_enter(pmap, va, pa, prot, flags)
2018 	pmap_t pmap;
2019 	vaddr_t va;
2020 	paddr_t pa;
2021 	vm_prot_t prot;
2022 	int flags;
2023 {
2024 	int new_pte, s;
2025 	boolean_t wired = (flags & PMAP_WIRED) != 0;
2026 
2027 #ifdef	PMAP_DEBUG
2028 	if ((pmap_debug & PMD_ENTER) ||
2029 		(va == pmap_db_watchva))
2030 		printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
2031 			   pmap, va, pa, prot, wired);
2032 #endif
2033 
2034 	/* Get page-type bits from low part of the PA... */
2035 	new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
2036 
2037 	/* ...now the valid and writable bits... */
2038 	new_pte |= PG_VALID;
2039 	if (prot & VM_PROT_WRITE)
2040 		new_pte |= PG_WRITE;
2041 
2042 	/* ...and finally the page-frame number. */
2043 	new_pte |= PA_PGNUM(pa);
2044 
2045 	/*
2046 	 * treatment varies significantly:
2047 	 *  kernel ptes are in all contexts, and are always in the mmu
2048 	 *  user ptes may not necessarily? be in the mmu.  pmap may not
2049 	 *   be in the mmu either.
2050 	 *
2051 	 */
2052 	s = splvm();
2053 	if (pmap == kernel_pmap) {
2054 		new_pte |= PG_SYSTEM;
2055 		pmap_enter_kernel(va, new_pte, wired);
2056 	} else {
2057 		pmap_enter_user(pmap, va, new_pte, wired);
2058 	}
2059 	splx(s);
2060 	return 0;
2061 }
2062 
2063 static void
2064 pmap_enter_kernel(pgva, new_pte, wired)
2065 	vaddr_t pgva;
2066 	int new_pte;
2067 	boolean_t wired;
2068 {
2069 	pmap_t pmap = kernel_pmap;
2070 	pmeg_t pmegp;
2071 	int do_pv, old_pte, sme;
2072 	vaddr_t segva;
2073 
2074 	/*
2075 	  keep in hardware only, since its mapped into all contexts anyway;
2076 	  need to handle possibly allocating additional pmegs
2077 	  need to make sure they cant be stolen from the kernel;
2078 	  map any new pmegs into all contexts, make sure rest of pmeg is null;
2079 	  deal with pv_stuff; possibly caching problems;
2080 	  must also deal with changes too.
2081 	  */
2082 
2083 	/*
2084 	 * In detail:
2085 	 *
2086 	 * (a) lock pmap
2087 	 * (b) Is the VA in a already mapped segment, if so
2088 	 *	 look to see if that VA address is "valid".  If it is, then
2089 	 *	 action is a change to an existing pte
2090 	 * (c) if not mapped segment, need to allocate pmeg
2091 	 * (d) if adding pte entry or changing physaddr of existing one,
2092 	 *		use pv_stuff, for change, pmap_remove() possibly.
2093 	 * (e) change/add pte
2094 	 */
2095 
2096 #ifdef	DIAGNOSTIC
2097 	if ((pgva < virtual_avail) || (pgva >= DVMA_MAP_END))
2098 		panic("pmap_enter_kernel: bad va=0x%lx", pgva);
2099 	if ((new_pte & (PG_VALID | PG_SYSTEM)) != (PG_VALID | PG_SYSTEM))
2100 		panic("pmap_enter_kernel: bad pte");
2101 #endif
2102 
2103 	if (pgva >= DVMA_MAP_BASE) {
2104 		/* This is DVMA space.  Always want it non-cached. */
2105 		new_pte |= PG_NC;
2106 	}
2107 
2108 	segva = m68k_trunc_seg(pgva);
2109 	do_pv = TRUE;
2110 
2111 	/* Do we have a PMEG? */
2112 	sme = get_segmap(segva);
2113 	if (sme != SEGINV) {
2114 		/* Found a PMEG in the segmap.  Cool. */
2115 		pmegp = pmeg_p(sme);
2116 #ifdef	DIAGNOSTIC
2117 		/* Make sure it is the right PMEG. */
2118 		if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
2119 			panic("pmap_enter_kernel: wrong sme at VA=0x%lx", segva);
2120 		/* Make sure it is ours. */
2121 		if (pmegp->pmeg_owner != pmap)
2122 			panic("pmap_enter_kernel: MMU has bad pmeg 0x%x", sme);
2123 #endif
2124 	} else {
2125 		/* No PMEG in the segmap.  Have to allocate one. */
2126 		pmegp = pmeg_allocate(pmap, segva);
2127 		sme = pmegp->pmeg_index;
2128 		pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
2129 		set_segmap_allctx(segva, sme);
2130 #ifdef	PMAP_DEBUG
2131 		pmeg_verify_empty(segva);
2132 		if (pmap_debug & PMD_SEGMAP) {
2133 			printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x (ek)\n",
2134 				   pmap, segva, sme);
2135 		}
2136 #endif
2137 		/* There are no existing mappings to deal with. */
2138 		old_pte = 0;
2139 		goto add_pte;
2140 	}
2141 
2142 	/*
2143 	 * We have a PMEG.  Is the VA already mapped to somewhere?
2144 	 *	(a) if so, is it same pa? (really a protection change)
2145 	 *	(b) if not same pa, then we have to unlink from old pa
2146 	 */
2147 	old_pte = get_pte(pgva);
2148 	if ((old_pte & PG_VALID) == 0)
2149 		goto add_pte;
2150 
2151 	/* Have valid translation.  Flush cache before changing it. */
2152 #ifdef	HAVECACHE
2153 	if (cache_size) {
2154 		cache_flush_page(pgva);
2155 		/* Get fresh mod/ref bits from write-back. */
2156 		old_pte = get_pte(pgva);
2157 	}
2158 #endif
2159 
2160 	/* XXX - removing valid page here, way lame... -glass */
2161 	pmegp->pmeg_vpages--;
2162 
2163 	if (!IS_MAIN_MEM(old_pte)) {
2164 		/* Was not main memory, so no pv_entry for it. */
2165 		goto add_pte;
2166 	}
2167 
2168 	/* Old mapping was main memory.  Save mod/ref bits. */
2169 	save_modref_bits(old_pte);
2170 
2171 	/*
2172 	 * If not changing the type or pfnum then re-use pv_entry.
2173 	 * Note we get here only with old_pte having PGT_OBMEM.
2174 	 */
2175 	if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) {
2176 		do_pv = FALSE;		/* re-use pv_entry */
2177 		new_pte |= (old_pte & PG_NC);
2178 		goto add_pte;
2179 	}
2180 
2181 	/* OK, different type or PA, have to kill old pv_entry. */
2182 	pv_unlink(pmap, old_pte, pgva);
2183 
2184  add_pte:	/* can be destructive */
2185 	pmeg_set_wiring(pmegp, pgva, wired);
2186 
2187 	/* Anything but MAIN_MEM is mapped non-cached. */
2188 	if (!IS_MAIN_MEM(new_pte)) {
2189 		new_pte |= PG_NC;
2190 		do_pv = FALSE;
2191 	}
2192 	if (do_pv == TRUE) {
2193 		if (pv_link(pmap, new_pte, pgva) & PV_NC)
2194 			new_pte |= PG_NC;
2195 	}
2196 #ifdef	PMAP_DEBUG
2197 	if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
2198 		printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x (ek)\n",
2199 			   pmap, pgva, old_pte, new_pte);
2200 	}
2201 #endif
2202 	/* cache flush done above */
2203 	set_pte(pgva, new_pte);
2204 	pmegp->pmeg_vpages++;
2205 }
2206 
2207 
2208 static void
2209 pmap_enter_user(pmap, pgva, new_pte, wired)
2210 	pmap_t pmap;
2211 	vaddr_t pgva;
2212 	int new_pte;
2213 	boolean_t wired;
2214 {
2215 	int do_pv, old_pte, sme;
2216 	vaddr_t segva;
2217 	pmeg_t pmegp;
2218 
2219 #ifdef	DIAGNOSTIC
2220 	if (pgva >= VM_MAXUSER_ADDRESS)
2221 		panic("pmap_enter_user: bad va=0x%lx", pgva);
2222 	if ((new_pte & (PG_VALID | PG_SYSTEM)) != PG_VALID)
2223 		panic("pmap_enter_user: bad pte");
2224 #endif
2225 #ifdef	PMAP_DEBUG
2226 	/*
2227 	 * Some user pages are wired here, and a later
2228 	 * call to pmap_unwire() will unwire them.
2229 	 * XXX - Need a separate list for wired user pmegs
2230 	 * so they can not be stolen from the active list.
2231 	 * XXX - Note: vm_fault.c assumes pmap_extract will
2232 	 * work on wired mappings, so must preserve them...
2233 	 * XXX: Maybe keep a list of wired PMEGs?
2234 	 */
2235 	if (wired && (pmap_debug & PMD_WIRING)) {
2236 		db_printf("pmap_enter_user: attempt to wire user page, ignored\n");
2237 		Debugger();
2238 	}
2239 #endif
2240 
2241 	/* Validate this assumption. */
2242 	if (pmap != current_pmap()) {
2243 #ifdef	PMAP_DEBUG
2244 		/* Aparently, this never happens. */
2245 		db_printf("pmap_enter_user: not curproc\n");
2246 		Debugger();
2247 #endif
2248 		/* Just throw it out (fault it in later). */
2249 		/* XXX: But must remember it if wired... */
2250 		return;
2251 	}
2252 
2253 	segva = m68k_trunc_seg(pgva);
2254 	do_pv = TRUE;
2255 
2256 	/*
2257 	 * If this pmap was sharing the "empty" context,
2258 	 * allocate a real context for its exclusive use.
2259 	 */
2260 	if (!has_context(pmap)) {
2261 		context_allocate(pmap);
2262 #ifdef PMAP_DEBUG
2263 		if (pmap_debug & PMD_CONTEXT)
2264 			printf("pmap_enter(%p) got context %d\n",
2265 				   pmap, pmap->pm_ctxnum);
2266 #endif
2267 		set_context(pmap->pm_ctxnum);
2268 	} else {
2269 #ifdef	PMAP_DEBUG
2270 		/* Make sure context is correct. */
2271 		if (pmap->pm_ctxnum != get_context()) {
2272 			db_printf("pmap_enter_user: wrong context\n");
2273 			Debugger();
2274 			/* XXX: OK to proceed? */
2275 			set_context(pmap->pm_ctxnum);
2276 		}
2277 #endif
2278 	}
2279 
2280 	/*
2281 	 * We have a context.  Do we have a PMEG?
2282 	 */
2283 	sme = get_segmap(segva);
2284 	if (sme != SEGINV) {
2285 		/* Found a PMEG in the segmap.  Cool. */
2286 		pmegp = pmeg_p(sme);
2287 #ifdef	DIAGNOSTIC
2288 		/* Make sure it is the right PMEG. */
2289 		if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
2290 			panic("pmap_enter_user: wrong sme at VA=0x%lx", segva);
2291 		/* Make sure it is ours. */
2292 		if (pmegp->pmeg_owner != pmap)
2293 			panic("pmap_enter_user: MMU has bad pmeg 0x%x", sme);
2294 #endif
2295 	} else {
2296 		/* Not in the segmap.  Try the S/W cache. */
2297 		pmegp = pmeg_cache(pmap, segva);
2298 		if (pmegp) {
2299 			/* Found PMEG in cache.  Just reload it. */
2300 			sme = pmegp->pmeg_index;
2301 			set_segmap(segva, sme);
2302 		} else {
2303 			/* PMEG not in cache, so allocate one. */
2304 			pmegp = pmeg_allocate(pmap, segva);
2305 			sme = pmegp->pmeg_index;
2306 			pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
2307 			set_segmap(segva, sme);
2308 #ifdef	PMAP_DEBUG
2309 			pmeg_verify_empty(segva);
2310 #endif
2311 		}
2312 #ifdef	PMAP_DEBUG
2313 		if (pmap_debug & PMD_SEGMAP) {
2314 			printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x (eu)\n",
2315 				   pmap, segva, sme);
2316 		}
2317 #endif
2318 	}
2319 
2320 	/*
2321 	 * We have a PMEG.  Is the VA already mapped to somewhere?
2322 	 *	(a) if so, is it same pa? (really a protection change)
2323 	 *	(b) if not same pa, then we have to unlink from old pa
2324 	 */
2325 	old_pte = get_pte(pgva);
2326 	if ((old_pte & PG_VALID) == 0)
2327 		goto add_pte;
2328 
2329 	/* Have valid translation.  Flush cache before changing it. */
2330 #ifdef	HAVECACHE
2331 	if (cache_size) {
2332 		cache_flush_page(pgva);
2333 		/* Get fresh mod/ref bits from write-back. */
2334 		old_pte = get_pte(pgva);
2335 	}
2336 #endif
2337 
2338 	/* XXX - removing valid page here, way lame... -glass */
2339 	pmegp->pmeg_vpages--;
2340 
2341 	if (!IS_MAIN_MEM(old_pte)) {
2342 		/* Was not main memory, so no pv_entry for it. */
2343 		goto add_pte;
2344 	}
2345 
2346 	/* Old mapping was main memory.  Save mod/ref bits. */
2347 	save_modref_bits(old_pte);
2348 
2349 	/*
2350 	 * If not changing the type or pfnum then re-use pv_entry.
2351 	 * Note we get here only with old_pte having PGT_OBMEM.
2352 	 */
2353 	if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) {
2354 		do_pv = FALSE;		/* re-use pv_entry */
2355 		new_pte |= (old_pte & PG_NC);
2356 		goto add_pte;
2357 	}
2358 
2359 	/* OK, different type or PA, have to kill old pv_entry. */
2360 	pv_unlink(pmap, old_pte, pgva);
2361 
2362  add_pte:
2363 	/* XXX - Wiring changes on user pmaps? */
2364 	/* pmeg_set_wiring(pmegp, pgva, wired); */
2365 
2366 	/* Anything but MAIN_MEM is mapped non-cached. */
2367 	if (!IS_MAIN_MEM(new_pte)) {
2368 		new_pte |= PG_NC;
2369 		do_pv = FALSE;
2370 	}
2371 	if (do_pv == TRUE) {
2372 		if (pv_link(pmap, new_pte, pgva) & PV_NC)
2373 			new_pte |= PG_NC;
2374 	}
2375 #ifdef	PMAP_DEBUG
2376 	if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
2377 		printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x (eu)\n",
2378 			   pmap, pgva, old_pte, new_pte);
2379 	}
2380 #endif
2381 	/* cache flush done above */
2382 	set_pte(pgva, new_pte);
2383 	pmegp->pmeg_vpages++;
2384 }
2385 
2386 void
2387 pmap_kenter_pa(va, pa, prot)
2388 	vaddr_t va;
2389 	paddr_t pa;
2390 	vm_prot_t prot;
2391 {
2392 	int new_pte, s;
2393 	pmap_t pmap = kernel_pmap;
2394 	pmeg_t pmegp;
2395 	int sme;
2396 	vaddr_t segva;
2397 
2398 #ifdef	PMAP_DEBUG
2399 	if ((pmap_debug & PMD_ENTER) ||
2400 		(va == pmap_db_watchva))
2401 		printf("pmap_kenter_pa(0x%lx, 0x%lx, 0x%x)\n",
2402 			   va, pa, prot);
2403 #endif
2404 
2405 	/* Get page-type bits from low part of the PA... */
2406 	new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
2407 
2408 	/* ...now the valid and writable bits... */
2409 	new_pte |= PG_SYSTEM|PG_VALID;
2410 	if (prot & VM_PROT_WRITE)
2411 		new_pte |= PG_WRITE;
2412 
2413 	/* ...and finally the page-frame number. */
2414 	new_pte |= PA_PGNUM(pa);
2415 
2416 	/*
2417 	 * keep in hardware only, since its mapped into all contexts anyway;
2418 	 * need to handle possibly allocating additional pmegs
2419 	 * need to make sure they cant be stolen from the kernel;
2420 	 * map any new pmegs into all contexts, make sure rest of pmeg is null;
2421 	 * must also deal with changes too.
2422 	 */
2423 
2424 	/*
2425 	 * In detail:
2426 	 *
2427 	 * (a) lock pmap
2428 	 * (b) Is the VA in a already mapped segment, if so
2429 	 *	 look to see if that VA address is "valid".  If it is, then
2430 	 *	 action is a change to an existing pte
2431 	 * (c) if not mapped segment, need to allocate pmeg
2432 	 * (d) change/add pte
2433 	 */
2434 
2435 #ifdef	DIAGNOSTIC
2436 	if ((va < virtual_avail) || (va >= DVMA_MAP_END))
2437 		panic("pmap_kenter_pa: bad va=0x%lx", va);
2438 #endif
2439 
2440 	if (va >= DVMA_MAP_BASE) {
2441 		/* This is DVMA space.  Always want it non-cached. */
2442 		new_pte |= PG_NC;
2443 	}
2444 
2445 	segva = m68k_trunc_seg(va);
2446 
2447 	s = splvm();
2448 
2449 	/* Do we have a PMEG? */
2450 	sme = get_segmap(segva);
2451 	if (sme != SEGINV) {
2452 		KASSERT((get_pte(va) & PG_VALID) == 0);
2453 
2454 		/* Found a PMEG in the segmap.  Cool. */
2455 		pmegp = pmeg_p(sme);
2456 #ifdef	DIAGNOSTIC
2457 		/* Make sure it is the right PMEG. */
2458 		if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
2459 			panic("pmap_kenter_pa: wrong sme at VA=0x%lx", segva);
2460 		/* Make sure it is ours. */
2461 		if (pmegp->pmeg_owner != pmap)
2462 			panic("pmap_kenter_pa: MMU has bad pmeg 0x%x", sme);
2463 #endif
2464 	} else {
2465 
2466 		/* No PMEG in the segmap.  Have to allocate one. */
2467 		pmegp = pmeg_allocate(pmap, segva);
2468 		sme = pmegp->pmeg_index;
2469 		pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
2470 		set_segmap_allctx(segva, sme);
2471 #ifdef	PMAP_DEBUG
2472 		pmeg_verify_empty(segva);
2473 		if (pmap_debug & PMD_SEGMAP) {
2474 			printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x (ek)\n",
2475 				   pmap, segva, sme);
2476 		}
2477 #endif
2478 	}
2479 
2480 	pmeg_set_wiring(pmegp, va, TRUE);
2481 
2482 	/* Anything but MAIN_MEM is mapped non-cached. */
2483 	if (!IS_MAIN_MEM(new_pte)) {
2484 		new_pte |= PG_NC;
2485 	}
2486 #ifdef	PMAP_DEBUG
2487 	if ((pmap_debug & PMD_SETPTE) || (va == pmap_db_watchva)) {
2488 		printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x (ek)\n",
2489 			   pmap, va, old_pte, new_pte);
2490 	}
2491 #endif
2492 	/* cache flush done above */
2493 	set_pte(va, new_pte);
2494 	pmegp->pmeg_vpages++;
2495 	splx(s);
2496 }
2497 
2498 void
2499 pmap_kremove(va, len)
2500 	vaddr_t va;
2501 	vsize_t len;
2502 {
2503 	pmap_t pmap = kernel_pmap;
2504 	vaddr_t eva, neva, pgva, segva, segnum;
2505 	int pte, sme;
2506 	pmeg_t pmegp;
2507 #ifdef	HAVECACHE
2508 	int flush_by_page = 0;
2509 #endif
2510 	int s;
2511 
2512 	s = splvm();
2513 	segnum = VA_SEGNUM(va);
2514 	for (eva = va + len; va < eva; va = neva, segnum++) {
2515 		neva = m68k_trunc_seg(va) + NBSG;
2516 		if (neva > eva) {
2517 			neva = eva;
2518 		}
2519 		if (pmap->pm_segmap[segnum] == SEGINV) {
2520 			continue;
2521 		}
2522 
2523 		segva = m68k_trunc_seg(va);
2524 		sme = get_segmap(segva);
2525 		pmegp = pmeg_p(sme);
2526 
2527 #ifdef	HAVECACHE
2528 		if (cache_size) {
2529 
2530 		    /*
2531 			 * If the range to be removed is larger than the cache,
2532 			 * it will be cheaper to flush this segment entirely.
2533 			 */
2534 
2535 			if (cache_size < (eva - va)) {
2536 				/* cheaper to flush whole segment */
2537 				cache_flush_segment(segva);
2538 			} else {
2539 				flush_by_page = 1;
2540 			}
2541 		}
2542 #endif
2543 
2544 		/* Invalidate the PTEs in the given range. */
2545 		for (pgva = va; pgva < neva; pgva += NBPG) {
2546 			pte = get_pte(pgva);
2547 			if (pte & PG_VALID) {
2548 #ifdef	HAVECACHE
2549 				if (flush_by_page) {
2550 					cache_flush_page(pgva);
2551 					/* Get fresh mod/ref bits from write-back. */
2552 					pte = get_pte(pgva);
2553 				}
2554 #endif
2555 #ifdef	PMAP_DEBUG
2556 				if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
2557 					printf("pmap: set_pte pmap=%p va=0x%lx"
2558 					   " old=0x%x new=0x%x (rrmmu)\n",
2559 					   pmap, pgva, pte, PG_INVAL);
2560 				}
2561 #endif
2562 				set_pte(pgva, PG_INVAL);
2563 				KASSERT(pmegp->pmeg_vpages > 0);
2564 				pmegp->pmeg_vpages--;
2565 			}
2566 		}
2567 		KASSERT(pmegp->pmeg_vpages >= 0);
2568 		if (pmegp->pmeg_vpages == 0) {
2569 			/* We are done with this pmeg. */
2570 #ifdef	PMAP_DEBUG
2571 			if (is_pmeg_wired(pmegp)) {
2572 				if (pmap_debug & PMD_WIRING) {
2573 					db_printf("pmap: removing wired pmeg: %p\n", pmegp);
2574 					Debugger();
2575 				}
2576 			}
2577 			if (pmap_debug & PMD_SEGMAP) {
2578 				printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x new=ff (rm)\n",
2579 					pmap->pm_ctxnum, segva, pmegp->pmeg_index);
2580 			}
2581 			pmeg_verify_empty(segva);
2582 #endif
2583 
2584 			/* Remove it from the MMU. */
2585 			set_segmap_allctx(segva, SEGINV);
2586 			pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV;
2587 
2588 			/* Now, put it on the free list. */
2589 			pmeg_free(pmegp);
2590 		}
2591 	}
2592 	splx(s);
2593 }
2594 
2595 
2596 /*
2597  * The trap handler calls this so we can try to resolve
2598  * user-level faults by reloading a PMEG.
2599  * If that does not prodce a valid mapping,
2600  * call vm_fault as usual.
2601  *
2602  * XXX: Merge this with the next function?
2603  */
2604 int
2605 _pmap_fault(map, va, ftype)
2606 	struct vm_map *map;
2607 	vaddr_t va;
2608 	vm_prot_t ftype;
2609 {
2610 	pmap_t pmap;
2611 	int rv;
2612 
2613 	pmap = vm_map_pmap(map);
2614 	if (map == kernel_map) {
2615 		/* Do not allow faults below the "managed" space. */
2616 		if (va < virtual_avail) {
2617 			/*
2618 			 * Most pages below virtual_avail are read-only,
2619 			 * so I will assume it is a protection failure.
2620 			 */
2621 			return EACCES;
2622 		}
2623 	} else {
2624 		/* User map.  Try reload shortcut. */
2625 		if (pmap_fault_reload(pmap, va, ftype))
2626 			return 0;
2627 	}
2628 	rv = uvm_fault(map, va, 0, ftype);
2629 
2630 #ifdef	PMAP_DEBUG
2631 	if (pmap_debug & PMD_FAULT) {
2632 		printf("pmap_fault(%p, 0x%lx, 0x%x) -> 0x%x\n",
2633 			   map, va, ftype, rv);
2634 	}
2635 #endif
2636 
2637 	return (rv);
2638 }
2639 
2640 /*
2641  * This is a shortcut used by the trap handler to
2642  * reload PMEGs into a user segmap without calling
2643  * the actual VM fault handler.  Returns TRUE if:
2644  *	the PMEG was reloaded, and
2645  *	it has a valid PTE at va.
2646  * Otherwise return zero and let VM code handle it.
2647  */
2648 int
2649 pmap_fault_reload(pmap, pgva, ftype)
2650 	pmap_t pmap;
2651 	vaddr_t pgva;
2652 	vm_prot_t ftype;
2653 {
2654 	int rv, s, pte, chkpte, sme;
2655 	vaddr_t segva;
2656 	pmeg_t pmegp;
2657 
2658 	if (pgva >= VM_MAXUSER_ADDRESS)
2659 		return (0);
2660 	if (pmap->pm_segmap == NULL) {
2661 #ifdef	PMAP_DEBUG
2662 		db_printf("pmap_fault_reload: null segmap\n");
2663 		Debugger();
2664 #endif
2665 		return (0);
2666 	}
2667 
2668 	/* Short-cut using the S/W segmap. */
2669 	if (pmap->pm_segmap[VA_SEGNUM(pgva)] == SEGINV)
2670 		return (0);
2671 
2672 	segva = m68k_trunc_seg(pgva);
2673 	chkpte = PG_VALID;
2674 	if (ftype & VM_PROT_WRITE)
2675 		chkpte |= PG_WRITE;
2676 	rv = 0;
2677 
2678 	s = splvm();
2679 
2680 	/*
2681 	 * Given that we faulted on a user-space address, we will
2682 	 * probably need a context.  Get a context now so we can
2683 	 * try to resolve the fault with a segmap reload.
2684 	 */
2685 	if (!has_context(pmap)) {
2686 		context_allocate(pmap);
2687 #ifdef PMAP_DEBUG
2688 		if (pmap_debug & PMD_CONTEXT)
2689 			printf("pmap_fault(%p) got context %d\n",
2690 				   pmap, pmap->pm_ctxnum);
2691 #endif
2692 		set_context(pmap->pm_ctxnum);
2693 	} else {
2694 #ifdef	PMAP_DEBUG
2695 		/* Make sure context is correct. */
2696 		if (pmap->pm_ctxnum != get_context()) {
2697 			db_printf("pmap_fault_reload: wrong context\n");
2698 			Debugger();
2699 			/* XXX: OK to proceed? */
2700 			set_context(pmap->pm_ctxnum);
2701 		}
2702 #endif
2703 	}
2704 
2705 	sme = get_segmap(segva);
2706 	if (sme == SEGINV) {
2707 		/* See if there is something to reload. */
2708 		pmegp = pmeg_cache(pmap, segva);
2709 		if (pmegp) {
2710 			/* Found one!  OK, reload it. */
2711 			pmap_stats.ps_pmeg_faultin++;
2712 			sme = pmegp->pmeg_index;
2713 			set_segmap(segva, sme);
2714 			pte = get_pte(pgva);
2715 			if (pte & chkpte)
2716 				rv = 1;
2717 		}
2718 	}
2719 
2720 	splx(s);
2721 	return (rv);
2722 }
2723 
2724 
2725 /*
2726  * Clear the modify bit for the given physical page.
2727  */
2728 boolean_t
2729 pmap_clear_modify(pg)
2730 	struct vm_page *pg;
2731 {
2732 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2733 	pv_entry_t *head;
2734 	u_char *pv_flags;
2735 	int s;
2736 	boolean_t rv;
2737 
2738 	pv_flags = pa_to_pvflags(pa);
2739 	head     = pa_to_pvhead(pa);
2740 
2741 	s = splvm();
2742 	*pv_flags |= pv_syncflags(*head);
2743 	rv = *pv_flags & PV_MOD;
2744 	*pv_flags &= ~PV_MOD;
2745 	splx(s);
2746 	return rv;
2747 }
2748 
2749 /*
2750  * Tell whether the given physical page has been modified.
2751  */
2752 boolean_t
2753 pmap_is_modified(pg)
2754 	struct vm_page *pg;
2755 {
2756 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2757 	pv_entry_t *head;
2758 	u_char *pv_flags;
2759 	int s;
2760 	boolean_t rv;
2761 
2762 	pv_flags = pa_to_pvflags(pa);
2763 	head     = pa_to_pvhead(pa);
2764 
2765 	s = splvm();
2766 	if ((*pv_flags & PV_MOD) == 0)
2767 		*pv_flags |= pv_syncflags(*head);
2768 	rv = (*pv_flags & PV_MOD);
2769 	splx(s);
2770 	return (rv);
2771 }
2772 
2773 /*
2774  * Clear the reference bit for the given physical page.
2775  * It's OK to just remove mappings if that's easier.
2776  */
2777 boolean_t
2778 pmap_clear_reference(pg)
2779 	struct vm_page *pg;
2780 {
2781 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2782 	pv_entry_t *head;
2783 	u_char *pv_flags;
2784 	int s;
2785 	boolean_t rv;
2786 
2787 	pv_flags = pa_to_pvflags(pa);
2788 	head     = pa_to_pvhead(pa);
2789 
2790 	s = splvm();
2791 	*pv_flags |= pv_syncflags(*head);
2792 	rv = *pv_flags & PV_REF;
2793 	*pv_flags &= ~PV_REF;
2794 	splx(s);
2795 	return rv;
2796 }
2797 
2798 /*
2799  * Tell whether the given physical page has been referenced.
2800  * It's OK to just return FALSE if page is not mapped.
2801  */
2802 boolean_t
2803 pmap_is_referenced(pg)
2804 	struct vm_page *pg;
2805 {
2806 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2807 	pv_entry_t *head;
2808 	u_char *pv_flags;
2809 	int s;
2810 	boolean_t rv;
2811 
2812 	pv_flags = pa_to_pvflags(pa);
2813 	head     = pa_to_pvhead(pa);
2814 
2815 	s = splvm();
2816 	if ((*pv_flags & PV_REF) == 0)
2817 		*pv_flags |= pv_syncflags(*head);
2818 	rv = (*pv_flags & PV_REF);
2819 	splx(s);
2820 	return (rv);
2821 }
2822 
2823 
2824 /*
2825  * This is called by locore.s:cpu_switch() when it is
2826  * switching to a new process.  Load new translations.
2827  * Note: done in-line by locore.s unless PMAP_DEBUG
2828  *
2829  * Note that we do NOT allocate a context here, but
2830  * share the "kernel only" context until we really
2831  * need our own context for user-space mappings in
2832  * pmap_enter_user().
2833  */
2834 void
2835 _pmap_switch(pmap)
2836 	pmap_t pmap;
2837 {
2838 	set_context(pmap->pm_ctxnum);
2839 	ICIA();
2840 }
2841 
2842 /*
2843  * Exported version of pmap_activate().  This is called from the
2844  * machine-independent VM code when a process is given a new pmap.
2845  * If (p == curproc) do like cpu_switch would do; otherwise just
2846  * take this as notification that the process has a new pmap.
2847  */
2848 void
2849 pmap_activate(p)
2850 	struct proc *p;
2851 {
2852 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
2853 
2854 	if (p == curproc) {
2855 		_pmap_switch(pmap);
2856 	}
2857 }
2858 
2859 /*
2860  * Deactivate the address space of the specified process.
2861  */
2862 void
2863 pmap_deactivate(p)
2864 	struct proc *p;
2865 {
2866 	/* Nothing to do. */
2867 }
2868 
2869 /*
2870  *	Routine:	pmap_unwire
2871  *	Function:	Clear the wired attribute for a map/virtual-address
2872  *			pair.
2873  *	In/out conditions:
2874  *			The mapping must already exist in the pmap.
2875  */
2876 void
2877 pmap_unwire(pmap, va)
2878 	pmap_t	pmap;
2879 	vaddr_t	va;
2880 {
2881 	int s, sme;
2882 	int wiremask, ptenum;
2883 	pmeg_t pmegp;
2884 
2885 #ifdef PMAP_DEBUG
2886 	if (pmap_debug & PMD_WIRING)
2887 		printf("pmap_unwire(pmap=%p, va=0x%lx)\n",
2888 			   pmap, va);
2889 #endif
2890 	/*
2891 	 * We are asked to unwire pages that were wired when
2892 	 * pmap_enter() was called and we ignored wiring.
2893 	 * (VM code appears to wire a stack page during fork.)
2894 	 */
2895 	if (pmap != kernel_pmap) {
2896 #ifdef PMAP_DEBUG
2897 		if (pmap_debug & PMD_WIRING) {
2898 			db_printf("  (user pmap -- ignored)\n");
2899 			Debugger();
2900 		}
2901 #endif
2902 		return;
2903 	}
2904 
2905 	ptenum = VA_PTE_NUM(va);
2906 	wiremask = 1 << ptenum;
2907 
2908 	s = splvm();
2909 	sme = get_segmap(va);
2910 	pmegp = pmeg_p(sme);
2911 	pmegp->pmeg_wired &= ~wiremask;
2912 	splx(s);
2913 }
2914 
2915 /*
2916  *	Copy the range specified by src_addr/len
2917  *	from the source map to the range dst_addr/len
2918  *	in the destination map.
2919  *
2920  *	This routine is only advisory and need not do anything.
2921  */
2922 void
2923 pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
2924 	pmap_t		dst_pmap;
2925 	pmap_t		src_pmap;
2926 	vaddr_t		dst_addr;
2927 	vsize_t		len;
2928 	vaddr_t		src_addr;
2929 {
2930 }
2931 
2932 /*
2933  *	Routine:	pmap_extract
2934  *	Function:
2935  *		Extract the physical page address associated
2936  *		with the given map/virtual_address pair.
2937  *	Returns zero if VA not valid.
2938  */
2939 boolean_t
2940 pmap_extract(pmap, va, pap)
2941 	pmap_t	pmap;
2942 	vaddr_t va;
2943 	paddr_t *pap;
2944 {
2945 	int s, sme, segnum, ptenum, pte;
2946 	paddr_t pa;
2947 
2948 	pte = 0;
2949 	s = splvm();
2950 	if (pmap == kernel_pmap) {
2951 		sme = get_segmap(va);
2952 		if (sme != SEGINV)
2953 			pte = get_pte(va);
2954 	} else {
2955 		/* This is rare, so do it the easy way. */
2956 		segnum = VA_SEGNUM(va);
2957 		sme = pmap->pm_segmap[segnum];
2958 		if (sme != SEGINV) {
2959 			ptenum = VA_PTE_NUM(va);
2960 			pte = get_pte_pmeg(sme, ptenum);
2961 		}
2962 	}
2963 	splx(s);
2964 
2965 	if ((pte & PG_VALID) == 0) {
2966 #ifdef PMAP_DEBUG
2967 		db_printf("pmap_extract: invalid va=0x%lx\n", va);
2968 		Debugger();
2969 #endif
2970 		return (FALSE);
2971 	}
2972 	pa = PG_PA(pte);
2973 #ifdef	DIAGNOSTIC
2974 	if (pte & PG_TYPE) {
2975 		panic("pmap_extract: not main mem, va=0x%lx", va);
2976 	}
2977 #endif
2978 	if (pap != NULL)
2979 		*pap = pa;
2980 	return (TRUE);
2981 }
2982 
2983 
2984 /*
2985  *	  pmap_page_protect:
2986  *
2987  *	  Lower the permission for all mappings to a given page.
2988  */
2989 void
2990 pmap_page_protect(pg, prot)
2991 	struct vm_page *pg;
2992 	vm_prot_t	   prot;
2993 {
2994 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2995 	int s;
2996 
2997 	s = splvm();
2998 #ifdef PMAP_DEBUG
2999 	if (pmap_debug & PMD_PROTECT)
3000 		printf("pmap_page_protect(0x%lx, 0x%lx)\n", pa, prot);
3001 #endif
3002 	switch (prot) {
3003 	case VM_PROT_ALL:
3004 		break;
3005 	case VM_PROT_READ:
3006 	case VM_PROT_READ|VM_PROT_EXECUTE:
3007 		pv_changepte(pa, 0, PG_WRITE);
3008 		break;
3009 	default:
3010 		/* remove mapping for all pmaps that have it */
3011 		pv_remove_all(pa);
3012 		break;
3013 	}
3014 	splx(s);
3015 }
3016 
3017 /*
3018  * Initialize a preallocated and zeroed pmap structure,
3019  * such as one in a vmspace structure.
3020  */
3021 void
3022 pmap_pinit(pmap)
3023 	pmap_t pmap;
3024 {
3025 	pmap_common_init(pmap);
3026 	pmap_user_init(pmap);
3027 }
3028 
3029 /*
3030  *	Reduce the permissions on the specified
3031  *	range of this map as requested.
3032  *	(Make pages read-only.)
3033  */
3034 void
3035 pmap_protect(pmap, sva, eva, prot)
3036 	pmap_t pmap;
3037 	vaddr_t sva, eva;
3038 	vm_prot_t	prot;
3039 {
3040 	vaddr_t va, neva;
3041 	int segnum;
3042 
3043 	/* If leaving writable, nothing to do. */
3044 	if (prot & VM_PROT_WRITE)
3045 		return;
3046 
3047 	/* If removing all permissions, just unmap. */
3048 	if ((prot & VM_PROT_READ) == 0) {
3049 		pmap_remove(pmap, sva, eva);
3050 		return;
3051 	}
3052 
3053 #ifdef	PMAP_DEBUG
3054 	if ((pmap_debug & PMD_PROTECT) ||
3055 		((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
3056 		printf("pmap_protect(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
3057 #endif
3058 
3059 	KASSERT((pmap == kernel_pmap) ?
3060 			sva >= virtual_avail && eva < DVMA_MAP_END :
3061 			eva <= VM_MAXUSER_ADDRESS);
3062 	va = sva;
3063 	segnum = VA_SEGNUM(va);
3064 	while (va < eva) {
3065 		neva = m68k_trunc_seg(va) + NBSG;
3066 		if (neva > eva)
3067 			neva = eva;
3068 		if (pmap->pm_segmap[segnum] != SEGINV)
3069 			pmap_protect1(pmap, va, neva);
3070 		va = neva;
3071 		segnum++;
3072 	}
3073 }
3074 
3075 /*
3076  * Remove write permissions in given range.
3077  * (guaranteed to be within one segment)
3078  * similar to pmap_remove1()
3079  */
3080 void
3081 pmap_protect1(pmap, sva, eva)
3082 	pmap_t pmap;
3083 	vaddr_t sva, eva;
3084 {
3085 	int old_ctx, s, sme;
3086 	boolean_t in_ctx;
3087 
3088 	s = splvm();
3089 
3090 #ifdef	DIAGNOSTIC
3091 	if (m68k_trunc_seg(sva) != m68k_trunc_seg(eva-1))
3092 		panic("pmap_protect1: bad range!");
3093 #endif
3094 
3095 	if (pmap == kernel_pmap) {
3096 		sme = get_segmap(sva);
3097 		if (sme != SEGINV)
3098 			pmap_protect_mmu(pmap, sva, eva);
3099 		goto out;
3100 	}
3101 	/* It is a user pmap. */
3102 
3103 	/* There is a PMEG, but maybe not active. */
3104 	old_ctx = INVALID_CONTEXT;
3105 	in_ctx = FALSE;
3106 	if (has_context(pmap)) {
3107 		/* Temporary context change. */
3108 		old_ctx = get_context();
3109 		set_context(pmap->pm_ctxnum);
3110 		sme = get_segmap(sva);
3111 		if (sme != SEGINV)
3112 			in_ctx = TRUE;
3113 	}
3114 
3115 	if (in_ctx == TRUE)
3116 		pmap_protect_mmu(pmap, sva, eva);
3117 	else
3118 		pmap_protect_noctx(pmap, sva, eva);
3119 
3120 	if (old_ctx != INVALID_CONTEXT) {
3121 		/* Restore previous context. */
3122 		set_context(old_ctx);
3123 	}
3124 
3125 out:
3126 	splx(s);
3127 }
3128 
3129 /*
3130  * Remove write permissions, all in one PMEG,
3131  * where that PMEG is currently in the MMU.
3132  * The current context is already correct.
3133  */
3134 void
3135 pmap_protect_mmu(pmap, sva, eva)
3136 	pmap_t pmap;
3137 	vaddr_t sva, eva;
3138 {
3139 	pmeg_t pmegp;
3140 	vaddr_t pgva, segva;
3141 	int pte, sme;
3142 #ifdef	HAVECACHE
3143 	int flush_by_page = 0;
3144 #endif
3145 
3146 	CHECK_SPL();
3147 
3148 #ifdef	DIAGNOSTIC
3149 	if (pmap != kernel_pmap) {
3150 		if (pmap->pm_ctxnum != get_context())
3151 			panic("pmap_protect_mmu: wrong context");
3152 	}
3153 #endif
3154 
3155 	segva = m68k_trunc_seg(sva);
3156 	sme = get_segmap(segva);
3157 
3158 #ifdef	DIAGNOSTIC
3159 	/* Make sure it is valid and known. */
3160 	if (sme == SEGINV)
3161 		panic("pmap_protect_mmu: SEGINV");
3162 	if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
3163 		panic("pmap_protect_mmu: incorrect sme, va=0x%lx", segva);
3164 #endif
3165 
3166 	pmegp = pmeg_p(sme);
3167 	/* have pmeg, will travel */
3168 
3169 #ifdef	DIAGNOSTIC
3170 	/* Make sure we own the pmeg, right va, etc. */
3171 	if ((pmegp->pmeg_va != segva) ||
3172 		(pmegp->pmeg_owner != pmap) ||
3173 		(pmegp->pmeg_version != pmap->pm_version))
3174 	{
3175 		panic("pmap_protect_mmu: bad pmeg=%p", pmegp);
3176 	}
3177 	if (pmegp->pmeg_vpages < 0)
3178 		panic("pmap_protect_mmu: npages corrupted");
3179 	if (pmegp->pmeg_vpages == 0)
3180 		panic("pmap_protect_mmu: no valid pages?");
3181 #endif
3182 
3183 #ifdef	HAVECACHE
3184 	if (cache_size) {
3185 		/*
3186 		 * If the range to be removed is larger than the cache,
3187 		 * it will be cheaper to flush this segment entirely.
3188 		 */
3189 		if (cache_size < (eva - sva)) {
3190 			/* cheaper to flush whole segment */
3191 			cache_flush_segment(segva);
3192 		} else {
3193 			flush_by_page = 1;
3194 		}
3195 	}
3196 #endif
3197 
3198 	/* Remove write permission in the given range. */
3199 	for (pgva = sva; pgva < eva; pgva += NBPG) {
3200 		pte = get_pte(pgva);
3201 		if (pte & PG_VALID) {
3202 #ifdef	HAVECACHE
3203 			if (flush_by_page) {
3204 				cache_flush_page(pgva);
3205 				/* Get fresh mod/ref bits from write-back. */
3206 				pte = get_pte(pgva);
3207 			}
3208 #endif
3209 			if (IS_MAIN_MEM(pte)) {
3210 				save_modref_bits(pte);
3211 			}
3212 			pte &= ~(PG_WRITE | PG_MODREF);
3213 			set_pte(pgva, pte);
3214 		}
3215 	}
3216 }
3217 
3218 /*
3219  * Remove write permissions, all in one PMEG,
3220  * where it is not currently in any context.
3221  */
3222 void
3223 pmap_protect_noctx(pmap, sva, eva)
3224 	pmap_t pmap;
3225 	vaddr_t sva, eva;
3226 {
3227 	int old_ctx, pte, sme, segnum;
3228 	vaddr_t pgva, segva;
3229 
3230 #ifdef	DIAGNOSTIC
3231 	/* Kernel always in a context (actually, in all contexts). */
3232 	if (pmap == kernel_pmap)
3233 		panic("pmap_protect_noctx: kernel_pmap");
3234 	if (pmap->pm_segmap == NULL)
3235 		panic("pmap_protect_noctx: null segmap");
3236 #endif
3237 
3238 	segva = m68k_trunc_seg(sva);
3239 	segnum = VA_SEGNUM(segva);
3240 	sme = pmap->pm_segmap[segnum];
3241 	if (sme == SEGINV)
3242 		return;
3243 
3244 	/*
3245 	 * Borrow the EMPTY_CONTEXT so we can access the PMEG
3246 	 * at its normal virtual address.
3247 	 */
3248 	old_ctx = get_context();
3249 	set_context(EMPTY_CONTEXT);
3250 	set_segmap(segva, sme);
3251 
3252 	/* Remove write permission in the given range. */
3253 	for (pgva = sva; pgva < eva; pgva += NBPG) {
3254 		pte = get_pte(pgva);
3255 		if (pte & PG_VALID) {
3256 			/* No cache flush needed. */
3257 			if (IS_MAIN_MEM(pte)) {
3258 				save_modref_bits(pte);
3259 			}
3260 			pte &= ~(PG_WRITE | PG_MODREF);
3261 			set_pte(pgva, pte);
3262 		}
3263 	}
3264 
3265 	/*
3266 	 * Make the EMPTY_CONTEXT really empty again, and
3267 	 * restore the previous context.
3268 	 */
3269 	set_segmap(segva, SEGINV);
3270 	set_context(old_ctx);
3271 }
3272 
3273 
3274 /*
3275  *	Remove the given range of addresses from the specified map.
3276  *
3277  *	It is assumed that the start and end are properly
3278  *	rounded to the page size.
3279  */
3280 void
3281 pmap_remove(pmap, sva, eva)
3282 	pmap_t pmap;
3283 	vaddr_t sva, eva;
3284 {
3285 	vaddr_t va, neva;
3286 	int segnum;
3287 
3288 #ifdef	PMAP_DEBUG
3289 	if ((pmap_debug & PMD_REMOVE) ||
3290 		((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
3291 		printf("pmap_remove(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
3292 #endif
3293 
3294 
3295 	KASSERT((pmap == kernel_pmap) ?
3296 			sva >= virtual_avail && eva < DVMA_MAP_END :
3297 			eva <= VM_MAXUSER_ADDRESS);
3298 	va = sva;
3299 	segnum = VA_SEGNUM(va);
3300 	while (va < eva) {
3301 		neva = m68k_trunc_seg(va) + NBSG;
3302 		if (neva > eva)
3303 			neva = eva;
3304 		if (pmap->pm_segmap[segnum] != SEGINV)
3305 			pmap_remove1(pmap, va, neva);
3306 		va = neva;
3307 		segnum++;
3308 	}
3309 }
3310 
3311 /*
3312  * Remove user mappings, all within one segment
3313  */
3314 void
3315 pmap_remove1(pmap, sva, eva)
3316 	pmap_t pmap;
3317 	vaddr_t sva, eva;
3318 {
3319 	int old_ctx, s, sme;
3320 	boolean_t in_ctx;
3321 
3322 	s = splvm();
3323 
3324 #ifdef	DIAGNOSTIC
3325 	if (m68k_trunc_seg(sva) != m68k_trunc_seg(eva-1))
3326 		panic("pmap_remove1: bad range!");
3327 #endif
3328 
3329 	if (pmap == kernel_pmap) {
3330 		sme = get_segmap(sva);
3331 		if (sme != SEGINV)
3332 			pmap_remove_mmu(pmap, sva, eva);
3333 		goto out;
3334 	}
3335 	/* It is a user pmap. */
3336 
3337 	/* There is a PMEG, but maybe not active. */
3338 	old_ctx = INVALID_CONTEXT;
3339 	in_ctx = FALSE;
3340 	if (has_context(pmap)) {
3341 		/* Temporary context change. */
3342 		old_ctx = get_context();
3343 		set_context(pmap->pm_ctxnum);
3344 		sme = get_segmap(sva);
3345 		if (sme != SEGINV)
3346 			in_ctx = TRUE;
3347 	}
3348 
3349 	if (in_ctx == TRUE)
3350 		pmap_remove_mmu(pmap, sva, eva);
3351 	else
3352 		pmap_remove_noctx(pmap, sva, eva);
3353 
3354 	if (old_ctx != INVALID_CONTEXT) {
3355 		/* Restore previous context. */
3356 		set_context(old_ctx);
3357 	}
3358 
3359 out:
3360 	splx(s);
3361 }
3362 
3363 /*
3364  * Remove some mappings, all in one PMEG,
3365  * where that PMEG is currently in the MMU.
3366  * The current context is already correct.
3367  * If no PTEs remain valid in the PMEG, free it.
3368  */
3369 void
3370 pmap_remove_mmu(pmap, sva, eva)
3371 	pmap_t pmap;
3372 	vaddr_t sva, eva;
3373 {
3374 	pmeg_t pmegp;
3375 	vaddr_t pgva, segva;
3376 	int pte, sme;
3377 #ifdef	HAVECACHE
3378 	int flush_by_page = 0;
3379 #endif
3380 
3381 	CHECK_SPL();
3382 
3383 #ifdef	DIAGNOSTIC
3384 	if (pmap != kernel_pmap) {
3385 		if (pmap->pm_ctxnum != get_context())
3386 			panic("pmap_remove_mmu: wrong context");
3387 	}
3388 #endif
3389 
3390 	segva = m68k_trunc_seg(sva);
3391 	sme = get_segmap(segva);
3392 
3393 #ifdef	DIAGNOSTIC
3394 	/* Make sure it is valid and known. */
3395 	if (sme == SEGINV)
3396 		panic("pmap_remove_mmu: SEGINV");
3397 	if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
3398 		panic("pmap_remove_mmu: incorrect sme, va=0x%lx", segva);
3399 #endif
3400 
3401 	pmegp = pmeg_p(sme);
3402 	/* have pmeg, will travel */
3403 
3404 #ifdef	DIAGNOSTIC
3405 	/* Make sure we own the pmeg, right va, etc. */
3406 	if ((pmegp->pmeg_va != segva) ||
3407 		(pmegp->pmeg_owner != pmap) ||
3408 		(pmegp->pmeg_version != pmap->pm_version))
3409 	{
3410 		panic("pmap_remove_mmu: bad pmeg=%p", pmegp);
3411 	}
3412 	if (pmegp->pmeg_vpages < 0)
3413 		panic("pmap_remove_mmu: npages corrupted");
3414 	if (pmegp->pmeg_vpages == 0)
3415 		panic("pmap_remove_mmu: no valid pages?");
3416 #endif
3417 
3418 #ifdef	HAVECACHE
3419 	if (cache_size) {
3420 		/*
3421 		 * If the range to be removed is larger than the cache,
3422 		 * it will be cheaper to flush this segment entirely.
3423 		 */
3424 		if (cache_size < (eva - sva)) {
3425 			/* cheaper to flush whole segment */
3426 			cache_flush_segment(segva);
3427 		} else {
3428 			flush_by_page = 1;
3429 		}
3430 	}
3431 #endif
3432 
3433 	/* Invalidate the PTEs in the given range. */
3434 	for (pgva = sva; pgva < eva; pgva += NBPG) {
3435 		pte = get_pte(pgva);
3436 		if (pte & PG_VALID) {
3437 #ifdef	HAVECACHE
3438 			if (flush_by_page) {
3439 				cache_flush_page(pgva);
3440 				/* Get fresh mod/ref bits from write-back. */
3441 				pte = get_pte(pgva);
3442 			}
3443 #endif
3444 			if (IS_MAIN_MEM(pte)) {
3445 				save_modref_bits(pte);
3446 				pv_unlink(pmap, pte, pgva);
3447 			}
3448 #ifdef	PMAP_DEBUG
3449 			if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
3450 				printf("pmap: set_pte pmap=%p va=0x%lx"
3451 					   " old=0x%x new=0x%x (rrmmu)\n",
3452 					   pmap, pgva, pte, PG_INVAL);
3453 			}
3454 #endif
3455 			set_pte(pgva, PG_INVAL);
3456 			KASSERT(pmegp->pmeg_vpages > 0);
3457 			pmegp->pmeg_vpages--;
3458 		}
3459 	}
3460 
3461 	KASSERT(pmegp->pmeg_vpages >= 0);
3462 	if (pmegp->pmeg_vpages == 0) {
3463 		/* We are done with this pmeg. */
3464 		if (is_pmeg_wired(pmegp)) {
3465 #ifdef	PMAP_DEBUG
3466 			if (pmap_debug & PMD_WIRING) {
3467 				db_printf("pmap: removing wired pmeg: %p\n", pmegp);
3468 				Debugger();
3469 			}
3470 #endif	/* PMAP_DEBUG */
3471 		}
3472 
3473 #ifdef	PMAP_DEBUG
3474 		if (pmap_debug & PMD_SEGMAP) {
3475 			printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x new=ff (rm)\n",
3476 			    pmap->pm_ctxnum, segva, pmegp->pmeg_index);
3477 		}
3478 		pmeg_verify_empty(segva);
3479 #endif
3480 
3481 		/* Remove it from the MMU. */
3482 		if (kernel_pmap == pmap) {
3483 			/* Did cache flush above. */
3484 			set_segmap_allctx(segva, SEGINV);
3485 		} else {
3486 			/* Did cache flush above. */
3487 			set_segmap(segva, SEGINV);
3488 		}
3489 		pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV;
3490 		/* Now, put it on the free list. */
3491 		pmeg_free(pmegp);
3492 	}
3493 }
3494 
3495 /*
3496  * Remove some mappings, all in one PMEG,
3497  * where it is not currently in any context.
3498  */
3499 void
3500 pmap_remove_noctx(pmap, sva, eva)
3501 	pmap_t pmap;
3502 	vaddr_t sva, eva;
3503 {
3504 	pmeg_t pmegp;
3505 	int old_ctx, pte, sme, segnum;
3506 	vaddr_t pgva, segva;
3507 
3508 	CHECK_SPL();
3509 
3510 #ifdef	DIAGNOSTIC
3511 	/* Kernel always in a context (actually, in all contexts). */
3512 	if (pmap == kernel_pmap)
3513 		panic("pmap_remove_noctx: kernel_pmap");
3514 	if (pmap->pm_segmap == NULL)
3515 		panic("pmap_remove_noctx: null segmap");
3516 #endif
3517 
3518 	segva = m68k_trunc_seg(sva);
3519 	segnum = VA_SEGNUM(segva);
3520 	sme = pmap->pm_segmap[segnum];
3521 	if (sme == SEGINV)
3522 		return;
3523 	pmegp = pmeg_p(sme);
3524 
3525 	/*
3526 	 * Borrow the EMPTY_CONTEXT so we can access the PMEG
3527 	 * at its normal virtual address.
3528 	 */
3529 	old_ctx = get_context();
3530 	set_context(EMPTY_CONTEXT);
3531 	set_segmap(segva, sme);
3532 
3533 	/* Invalidate the PTEs in the given range. */
3534 	for (pgva = sva; pgva < eva; pgva += NBPG) {
3535 		pte = get_pte(pgva);
3536 		if (pte & PG_VALID) {
3537 			/* No cache flush needed. */
3538 			if (IS_MAIN_MEM(pte)) {
3539 				save_modref_bits(pte);
3540 				pv_unlink(pmap, pte, pgva);
3541 			}
3542 #ifdef	PMAP_DEBUG
3543 			if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
3544 				printf("pmap: set_pte pmap=%p va=0x%lx"
3545 					   " old=0x%x new=0x%x (rrncx)\n",
3546 					   pmap, pgva, pte, PG_INVAL);
3547 			}
3548 #endif
3549 			set_pte(pgva, PG_INVAL);
3550 			KASSERT(pmegp->pmeg_vpages > 0);
3551 			pmegp->pmeg_vpages--;
3552 		}
3553 	}
3554 
3555 	/*
3556 	 * Make the EMPTY_CONTEXT really empty again, and
3557 	 * restore the previous context.
3558 	 */
3559 	set_segmap(segva, SEGINV);
3560 	set_context(old_ctx);
3561 
3562 	KASSERT(pmegp->pmeg_vpages >= 0);
3563 	if (pmegp->pmeg_vpages == 0) {
3564 		/* We are done with this pmeg. */
3565 		if (is_pmeg_wired(pmegp)) {
3566 #ifdef	PMAP_DEBUG
3567 			if (pmap_debug & PMD_WIRING) {
3568 				db_printf("pmap: removing wired pmeg: %p\n", pmegp);
3569 				Debugger();
3570 			}
3571 #endif	/* PMAP_DEBUG */
3572 		}
3573 
3574 		pmap->pm_segmap[segnum] = SEGINV;
3575 		pmeg_free(pmegp);
3576 	}
3577 }
3578 
3579 
3580 /*
3581  * Count resident pages in this pmap.
3582  * See: kern_sysctl.c:pmap_resident_count
3583  */
3584 segsz_t
3585 pmap_resident_pages(pmap)
3586 	pmap_t pmap;
3587 {
3588 	int i, sme, pages;
3589 	pmeg_t pmeg;
3590 
3591 	if (pmap->pm_segmap == 0)
3592 		return (0);
3593 
3594 	pages = 0;
3595 	for (i = 0; i < NUSEG; i++) {
3596 		sme = pmap->pm_segmap[i];
3597 		if (sme != SEGINV) {
3598 			pmeg = pmeg_p(sme);
3599 			pages += pmeg->pmeg_vpages;
3600 		}
3601 	}
3602 	return (pages);
3603 }
3604 
3605 /*
3606  * Count wired pages in this pmap.
3607  * See vm_mmap.c:pmap_wired_count
3608  */
3609 segsz_t
3610 pmap_wired_pages(pmap)
3611 	pmap_t pmap;
3612 {
3613 	int i, mask, sme, pages;
3614 	pmeg_t pmeg;
3615 
3616 	if (pmap->pm_segmap == 0)
3617 		return (0);
3618 
3619 	pages = 0;
3620 	for (i = 0; i < NUSEG; i++) {
3621 		sme = pmap->pm_segmap[i];
3622 		if (sme != SEGINV) {
3623 			pmeg = pmeg_p(sme);
3624 			mask = 0x8000;
3625 			do {
3626 				if (pmeg->pmeg_wired & mask)
3627 					pages++;
3628 				mask = (mask >> 1);
3629 			} while (mask);
3630 		}
3631 	}
3632 	return (pages);
3633 }
3634 
3635 
3636 /*
3637  *	pmap_copy_page copies the specified (machine independent)
3638  *	page by mapping the page into virtual memory and using
3639  *	bcopy to copy the page, one machine dependent page at a
3640  *	time.
3641  */
3642 void
3643 pmap_copy_page(src, dst)
3644 	paddr_t	src, dst;
3645 {
3646 	int pte;
3647 	int s;
3648 
3649 	s = splvm();
3650 
3651 #ifdef	PMAP_DEBUG
3652 	if (pmap_debug & PMD_COW)
3653 		printf("pmap_copy_page: 0x%lx -> 0x%lx\n", src, dst);
3654 #endif
3655 
3656 #ifdef DIAGNOSTIC
3657 	if (tmp_vpages_inuse)
3658 		panic("pmap_copy_page: vpages inuse");
3659 	tmp_vpages_inuse++;
3660 #endif
3661 
3662 	/* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
3663 	/* All mappings to vmp_vpages are non-cached, so no flush. */
3664 	pte = PG_PERM | PA_PGNUM(src);
3665 	set_pte(tmp_vpages[0], pte);
3666 	pte = PG_PERM | PA_PGNUM(dst);
3667 	set_pte(tmp_vpages[1], pte);
3668 	copypage((char *) tmp_vpages[0], (char *) tmp_vpages[1]);
3669 	set_pte(tmp_vpages[0], PG_INVAL);
3670 	set_pte(tmp_vpages[0], PG_INVAL);
3671 
3672 #ifdef DIAGNOSTIC
3673 	tmp_vpages_inuse--;
3674 #endif
3675 
3676 	splx(s);
3677 }
3678 
3679 /*
3680  *	pmap_zero_page zeros the specified (machine independent)
3681  *	page by mapping the page into virtual memory and using
3682  *	bzero to clear its contents, one machine dependent page
3683  *	at a time.
3684  */
3685 void
3686 pmap_zero_page(pa)
3687 	paddr_t	pa;
3688 {
3689 	int pte;
3690 	int s;
3691 
3692 	s = splvm();
3693 
3694 #ifdef	PMAP_DEBUG
3695 	if (pmap_debug & PMD_COW)
3696 		printf("pmap_zero_page: 0x%lx\n", pa);
3697 #endif
3698 
3699 #ifdef DIAGNOSTIC
3700 	if (tmp_vpages_inuse)
3701 		panic("pmap_zero_page: vpages inuse");
3702 	tmp_vpages_inuse++;
3703 #endif
3704 
3705 	/* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
3706 	/* All mappings to vmp_vpages are non-cached, so no flush. */
3707 	pte = PG_PERM | PA_PGNUM(pa);
3708 	set_pte(tmp_vpages[0], pte);
3709 	zeropage((char *) tmp_vpages[0]);
3710 	set_pte(tmp_vpages[0], PG_INVAL);
3711 
3712 #ifdef DIAGNOSTIC
3713 	tmp_vpages_inuse--;
3714 #endif
3715 
3716 	splx(s);
3717 }
3718 
3719 /*
3720  *	Routine:	pmap_collect
3721  *	Function:
3722  *		Garbage collects the physical map system for
3723  *		pages which are no longer used.
3724  *		Success need not be guaranteed -- that is, there
3725  *		may well be pages which are not referenced, but
3726  *		others may be collected.
3727  *	Usage:
3728  *		Called by the pageout daemon when pages are scarce.
3729  */
3730 void
3731 pmap_collect(pmap)
3732 	pmap_t pmap;
3733 {
3734 }
3735 
3736 /*
3737  * Find first virtual address >= *va that is
3738  * least likely to cause cache aliases.
3739  * (This will just seg-align mappings.)
3740  */
3741 void
3742 pmap_prefer(fo, va)
3743 	vaddr_t fo;
3744 	vaddr_t *va;
3745 {
3746 	long d;
3747 
3748 	d = fo - *va;
3749 	d &= SEGOFSET;
3750 	*va += d;
3751 }
3752 
3753 /*
3754  * Fill in the sun3x-specific part of the kernel core header
3755  * for dumpsys().  (See machdep.c for the rest.)
3756  */
3757 void
3758 pmap_kcore_hdr(sh)
3759 	struct sun3_kcore_hdr *sh;
3760 {
3761 	vaddr_t va;
3762 	u_char *cp, *ep;
3763 
3764 	sh->segshift = SEGSHIFT;
3765 	sh->pg_frame = PG_FRAME;
3766 	sh->pg_valid = PG_VALID;
3767 
3768 	/* Copy the kernel segmap (256 bytes). */
3769 	va = KERNBASE;
3770 	cp = sh->ksegmap;
3771 	ep = cp + sizeof(sh->ksegmap);
3772 	do {
3773 		*cp = get_segmap(va);
3774 		va += NBSG;
3775 		cp++;
3776 	} while (cp < ep);
3777 }
3778 
3779 /*
3780  * Copy the pagemap RAM into the passed buffer (one page)
3781  * starting at OFF in the pagemap RAM.
3782  */
3783 void
3784 pmap_get_pagemap(pt, off)
3785 	int *pt;
3786 	int off;
3787 {
3788 	vaddr_t va, va_end;
3789 	int sme, sme_end;	/* SegMap Entry numbers */
3790 
3791 	sme = (off >> 6);	/* PMEG to start on */
3792 	sme_end = sme + 128; /* where to stop */
3793 	va_end = temp_seg_va + NBSG;
3794 
3795 	do {
3796 		set_segmap(temp_seg_va, sme);
3797 		va = temp_seg_va;
3798 		do {
3799 			*pt++ = get_pte(va);
3800 			va += NBPG;
3801 		} while (va < va_end);
3802 		sme++;
3803 	} while (sme < sme_end);
3804 	set_segmap(temp_seg_va, SEGINV);
3805 }
3806 
3807 
3808 /*
3809  * Helper functions for changing unloaded PMEGs
3810  * XXX: These should go away.  (Borrow context zero instead.)
3811  */
3812 
3813 #ifdef DIAGNOSTIC
3814 static int temp_seg_inuse;
3815 #endif
3816 
3817 static int
3818 get_pte_pmeg(int pmeg_num, int page_num)
3819 {
3820 	vaddr_t va;
3821 	int pte;
3822 
3823 	CHECK_SPL();
3824 #ifdef DIAGNOSTIC
3825 	if (temp_seg_inuse)
3826 		panic("get_pte_pmeg: temp_seg_inuse");
3827 	temp_seg_inuse++;
3828 #endif
3829 
3830 	va = temp_seg_va;
3831 	set_segmap(temp_seg_va, pmeg_num);
3832 	va += NBPG*page_num;
3833 	pte = get_pte(va);
3834 	set_segmap(temp_seg_va, SEGINV);
3835 
3836 #ifdef DIAGNOSTIC
3837 	temp_seg_inuse--;
3838 #endif
3839 	return pte;
3840 }
3841 
3842 static void
3843 set_pte_pmeg(int pmeg_num, int page_num, int pte)
3844 {
3845 	vaddr_t va;
3846 
3847 	CHECK_SPL();
3848 #ifdef DIAGNOSTIC
3849 	if (temp_seg_inuse)
3850 		panic("set_pte_pmeg: temp_seg_inuse");
3851 	temp_seg_inuse++;
3852 #endif
3853 
3854 	/* We never access data in temp_seg_va so no need to flush. */
3855 	va = temp_seg_va;
3856 	set_segmap(temp_seg_va, pmeg_num);
3857 	va += NBPG*page_num;
3858 	set_pte(va, pte);
3859 	set_segmap(temp_seg_va, SEGINV);
3860 
3861 #ifdef DIAGNOSTIC
3862 	temp_seg_inuse--;
3863 #endif
3864 }
3865 
3866 /*
3867  *	Routine:        pmap_procwr
3868  *
3869  *	Function:
3870  *		Synchronize caches corresponding to [addr, addr+len) in p.
3871  */
3872 void
3873 pmap_procwr(p, va, len)
3874 	struct proc	*p;
3875 	vaddr_t		va;
3876 	size_t		len;
3877 {
3878 	(void)cachectl1(0x80000004, va, len, p);
3879 }
3880 
3881 
3882 #ifdef	PMAP_DEBUG
3883 /* Things to call from the debugger. */
3884 
3885 void
3886 pmap_print(pmap)
3887 	pmap_t pmap;
3888 {
3889 	db_printf(" pm_ctxnum=%d\n", pmap->pm_ctxnum);
3890 	db_printf(" pm_version=0x%x\n", pmap->pm_version);
3891 	db_printf(" pm_segmap=%p\n", pmap->pm_segmap);
3892 }
3893 
3894 void
3895 pmeg_print(pmegp)
3896 	pmeg_t pmegp;
3897 {
3898 	db_printf("link_next=%p  link_prev=%p\n",
3899 	    pmegp->pmeg_link.tqe_next,
3900 	    pmegp->pmeg_link.tqe_prev);
3901 	db_printf("index=0x%x owner=%p own_vers=0x%x\n",
3902 	    pmegp->pmeg_index, pmegp->pmeg_owner, pmegp->pmeg_version);
3903 	db_printf("va=0x%lx wired=0x%x reserved=0x%x vpgs=0x%x qstate=0x%x\n",
3904 	    pmegp->pmeg_va, pmegp->pmeg_wired,
3905 	    pmegp->pmeg_reserved, pmegp->pmeg_vpages,
3906 	    pmegp->pmeg_qstate);
3907 }
3908 
3909 void
3910 pv_print(pa)
3911 	paddr_t pa;
3912 {
3913 	pv_entry_t pv;
3914 	int idx;
3915 
3916 	idx = PA_PGNUM(pa);
3917 	if (idx >= physmem) {
3918 		db_printf("bad address\n");
3919 		return;
3920 	}
3921 	db_printf("pa=0x%lx, flags=0x%x\n",
3922 			  pa, pv_flags_tbl[idx]);
3923 
3924 	pv = pv_head_tbl[idx];
3925 	while (pv) {
3926 		db_printf(" pv_entry %p pmap %p va 0x%lx next %p\n",
3927 			   pv, pv->pv_pmap, pv->pv_va, pv->pv_next);
3928 		pv = pv->pv_next;
3929 	}
3930 }
3931 #endif	/* PMAP_DEBUG */
3932 
3933 /*
3934  * Local Variables:
3935  * tab-width: 4
3936  * End:
3937  */
3938