xref: /original-bsd/sys/pmax/pmax/pmap.c (revision 759b7897)
1a06587f5Smckusick /*
23195adacSbostic  * Copyright (c) 1992, 1993
33195adacSbostic  *	The Regents of the University of California.  All rights reserved.
4a06587f5Smckusick  *
5a06587f5Smckusick  * This code is derived from software contributed to Berkeley by
6a06587f5Smckusick  * the Systems Programming Group of the University of Utah Computer
7a06587f5Smckusick  * Science Department and Ralph Campbell.
8a06587f5Smckusick  *
9a06587f5Smckusick  * %sccs.include.redist.c%
10a06587f5Smckusick  *
11*759b7897Sralph  *	@(#)pmap.c	8.5 (Berkeley) 06/02/95
12a06587f5Smckusick  */
13a06587f5Smckusick 
14a06587f5Smckusick /*
15a06587f5Smckusick  *	Manages physical address maps.
16a06587f5Smckusick  *
17a06587f5Smckusick  *	In addition to hardware address maps, this
18a06587f5Smckusick  *	module is called upon to provide software-use-only
19a06587f5Smckusick  *	maps which may or may not be stored in the same
20a06587f5Smckusick  *	form as hardware maps.  These pseudo-maps are
21a06587f5Smckusick  *	used to store intermediate results from copy
22a06587f5Smckusick  *	operations to and from address spaces.
23a06587f5Smckusick  *
24a06587f5Smckusick  *	Since the information managed by this module is
25a06587f5Smckusick  *	also stored by the logical address mapping module,
26a06587f5Smckusick  *	this module may throw away valid virtual-to-physical
27a06587f5Smckusick  *	mappings at almost any time.  However, invalidations
28a06587f5Smckusick  *	of virtual-to-physical mappings must be done as
29a06587f5Smckusick  *	requested.
30a06587f5Smckusick  *
31a06587f5Smckusick  *	In order to cope with hardware architectures which
32a06587f5Smckusick  *	make virtual-to-physical map invalidates expensive,
33a06587f5Smckusick  *	this module may delay invalidate or reduced protection
34a06587f5Smckusick  *	operations until such time as they are actually
35a06587f5Smckusick  *	necessary.  This module is given full information as
36a06587f5Smckusick  *	to which processors are currently using which maps,
37a06587f5Smckusick  *	and to when physical maps must be made correct.
38a06587f5Smckusick  */
39a06587f5Smckusick 
40327b2279Sbostic #include <sys/param.h>
41d8f00987Sralph #include <sys/systm.h>
42327b2279Sbostic #include <sys/proc.h>
43327b2279Sbostic #include <sys/malloc.h>
44327b2279Sbostic #include <sys/user.h>
45d8f00987Sralph #include <sys/buf.h>
4630203e69Smckusick #ifdef SYSVSHM
4730203e69Smckusick #include <sys/shm.h>
4830203e69Smckusick #endif
49a06587f5Smckusick 
50327b2279Sbostic #include <vm/vm_kern.h>
51327b2279Sbostic #include <vm/vm_page.h>
52d8f00987Sralph #include <vm/vm_pageout.h>
53a06587f5Smckusick 
54327b2279Sbostic #include <machine/machConst.h>
55327b2279Sbostic #include <machine/pte.h>
56a06587f5Smckusick 
57d8f00987Sralph extern vm_page_t vm_page_alloc1 __P((void));
58d8f00987Sralph extern void vm_page_free1 __P((vm_page_t));
59d8f00987Sralph 
60a06587f5Smckusick /*
61a06587f5Smckusick  * For each vm_page_t, there is a list of all currently valid virtual
62a06587f5Smckusick  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
63a06587f5Smckusick  * XXX really should do this as a part of the higher level code.
64a06587f5Smckusick  */
65a06587f5Smckusick typedef struct pv_entry {
66a06587f5Smckusick 	struct pv_entry	*pv_next;	/* next pv_entry */
67a06587f5Smckusick 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
68a06587f5Smckusick 	vm_offset_t	pv_va;		/* virtual address for mapping */
69a06587f5Smckusick } *pv_entry_t;
70a06587f5Smckusick 
71a06587f5Smckusick pv_entry_t	pv_table;	/* array of entries, one per page */
72a06587f5Smckusick extern void	pmap_remove_pv();
73a06587f5Smckusick 
74a06587f5Smckusick #define pa_index(pa)		atop((pa) - first_phys_addr)
75a06587f5Smckusick #define pa_to_pvh(pa)		(&pv_table[pa_index(pa)])
76a06587f5Smckusick 
77a06587f5Smckusick #ifdef DEBUG
78a06587f5Smckusick struct {
79a06587f5Smckusick 	int kernel;	/* entering kernel mapping */
80a06587f5Smckusick 	int user;	/* entering user mapping */
81a06587f5Smckusick 	int ptpneeded;	/* needed to allocate a PT page */
82a06587f5Smckusick 	int pwchange;	/* no mapping change, just wiring or protection */
83a06587f5Smckusick 	int wchange;	/* no mapping change, just wiring */
84a06587f5Smckusick 	int mchange;	/* was mapped but mapping to different page */
85a06587f5Smckusick 	int managed;	/* a managed page */
86a06587f5Smckusick 	int firstpv;	/* first mapping for this PA */
87a06587f5Smckusick 	int secondpv;	/* second mapping for this PA */
88a06587f5Smckusick 	int ci;		/* cache inhibited */
89a06587f5Smckusick 	int unmanaged;	/* not a managed page */
90a06587f5Smckusick 	int flushes;	/* cache flushes */
91a06587f5Smckusick 	int cachehit;	/* new entry forced valid entry out */
92a06587f5Smckusick } enter_stats;
93a06587f5Smckusick struct {
94a06587f5Smckusick 	int calls;
95a06587f5Smckusick 	int removes;
96a06587f5Smckusick 	int flushes;
97a06587f5Smckusick 	int pidflushes;	/* HW pid stolen */
98a06587f5Smckusick 	int pvfirst;
99a06587f5Smckusick 	int pvsearch;
100a06587f5Smckusick } remove_stats;
101a06587f5Smckusick 
102a06587f5Smckusick int pmapdebug;
103a06587f5Smckusick #define PDB_FOLLOW	0x0001
104a06587f5Smckusick #define PDB_INIT	0x0002
105a06587f5Smckusick #define PDB_ENTER	0x0004
106a06587f5Smckusick #define PDB_REMOVE	0x0008
107a06587f5Smckusick #define PDB_CREATE	0x0010
108a06587f5Smckusick #define PDB_PTPAGE	0x0020
109d8f00987Sralph #define PDB_PVENTRY	0x0040
110a06587f5Smckusick #define PDB_BITS	0x0080
111a06587f5Smckusick #define PDB_COLLECT	0x0100
112a06587f5Smckusick #define PDB_PROTECT	0x0200
113a06587f5Smckusick #define PDB_TLBPID	0x0400
114a06587f5Smckusick #define PDB_PARANOIA	0x2000
115a06587f5Smckusick #define PDB_WIRING	0x4000
116a06587f5Smckusick #define PDB_PVDUMP	0x8000
117a06587f5Smckusick 
118a06587f5Smckusick #endif /* DEBUG */
119a06587f5Smckusick 
120a06587f5Smckusick struct pmap	kernel_pmap_store;
121a06587f5Smckusick 
122a06587f5Smckusick vm_offset_t    	avail_start;	/* PA of first available physical page */
123a06587f5Smckusick vm_offset_t	avail_end;	/* PA of last available physical page */
124a06587f5Smckusick vm_size_t	mem_size;	/* memory size in bytes */
125a06587f5Smckusick vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
126a06587f5Smckusick vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
127a06587f5Smckusick int		pmaxpagesperpage;	/* PAGE_SIZE / NBPG */
128a06587f5Smckusick #ifdef ATTR
129a06587f5Smckusick char		*pmap_attributes;	/* reference and modify bits */
130a06587f5Smckusick #endif
131d8f00987Sralph struct segtab	*free_segtab;		/* free list kept locally */
132d8f00987Sralph u_int		tlbpid_gen = 1;		/* TLB PID generation count */
133d8f00987Sralph int		tlbpid_cnt = 2;		/* next available TLB PID */
134d8f00987Sralph pt_entry_t	*Sysmap;		/* kernel pte table */
135d8f00987Sralph u_int		Sysmapsize;		/* number of pte's in Sysmap */
136a06587f5Smckusick 
137a06587f5Smckusick /*
138a06587f5Smckusick  *	Bootstrap the system enough to run with virtual memory.
139d8f00987Sralph  *	firstaddr is the first unused kseg0 address (not page aligned).
140a06587f5Smckusick  */
141a06587f5Smckusick void
pmap_bootstrap(firstaddr)142a06587f5Smckusick pmap_bootstrap(firstaddr)
143a06587f5Smckusick 	vm_offset_t firstaddr;
144a06587f5Smckusick {
145a06587f5Smckusick 	register int i;
146a06587f5Smckusick 	vm_offset_t start = firstaddr;
147a06587f5Smckusick 	extern int maxmem, physmem;
148a06587f5Smckusick 
149d8f00987Sralph #define	valloc(name, type, num) \
150d8f00987Sralph 	    (name) = (type *)firstaddr; firstaddr = (vm_offset_t)((name)+(num))
151a06587f5Smckusick 	/*
152d8f00987Sralph 	 * Allocate a PTE table for the kernel.
15330203e69Smckusick 	 * The '1024' comes from PAGER_MAP_SIZE in vm_pager_init().
154d8f00987Sralph 	 * This should be kept in sync.
155d8f00987Sralph 	 * We also reserve space for kmem_alloc_pageable() for vm_fork().
156a06587f5Smckusick 	 */
157d8f00987Sralph 	Sysmapsize = (VM_KMEM_SIZE + VM_MBUF_SIZE + VM_PHYS_SIZE +
15830203e69Smckusick 		nbuf * MAXBSIZE + 16 * NCARGS) / NBPG + 1024 + 256;
15930203e69Smckusick #ifdef SYSVSHM
16030203e69Smckusick 	Sysmapsize += shminfo.shmall;
16130203e69Smckusick #endif
162d8f00987Sralph 	valloc(Sysmap, pt_entry_t, Sysmapsize);
163d8f00987Sralph #ifdef ATTR
164d8f00987Sralph 	valloc(pmap_attributes, char, physmem);
165d8f00987Sralph #endif
166a06587f5Smckusick 	/*
167a06587f5Smckusick 	 * Allocate memory for pv_table.
168a06587f5Smckusick 	 * This will allocate more entries than we really need.
169d8f00987Sralph 	 * We could do this in pmap_init when we know the actual
170d8f00987Sralph 	 * phys_start and phys_end but its better to use kseg0 addresses
171a06587f5Smckusick 	 * rather than kernel virtual addresses mapped through the TLB.
172a06587f5Smckusick 	 */
173d8f00987Sralph 	i = maxmem - pmax_btop(MACH_CACHED_TO_PHYS(firstaddr));
174d8f00987Sralph 	valloc(pv_table, struct pv_entry, i);
175a06587f5Smckusick 
176a06587f5Smckusick 	/*
177a06587f5Smckusick 	 * Clear allocated memory.
178a06587f5Smckusick 	 */
179d8f00987Sralph 	firstaddr = pmax_round_page(firstaddr);
180d8f00987Sralph 	bzero((caddr_t)start, firstaddr - start);
181a06587f5Smckusick 
182d8f00987Sralph 	avail_start = MACH_CACHED_TO_PHYS(firstaddr);
183a06587f5Smckusick 	avail_end = pmax_ptob(maxmem);
184a06587f5Smckusick 	mem_size = avail_end - avail_start;
185a06587f5Smckusick 
186a06587f5Smckusick 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
187d8f00987Sralph 	virtual_end = VM_MIN_KERNEL_ADDRESS + Sysmapsize * NBPG;
188a06587f5Smckusick 	/* XXX need to decide how to set cnt.v_page_size */
189a06587f5Smckusick 	pmaxpagesperpage = 1;
190a06587f5Smckusick 
19179123f99Sralph 	simple_lock_init(&kernel_pmap_store.pm_lock);
19279123f99Sralph 	kernel_pmap_store.pm_count = 1;
193a06587f5Smckusick }
194a06587f5Smckusick 
195a06587f5Smckusick /*
196a06587f5Smckusick  * Bootstrap memory allocator. This function allows for early dynamic
197a06587f5Smckusick  * memory allocation until the virtual memory system has been bootstrapped.
198a06587f5Smckusick  * After that point, either kmem_alloc or malloc should be used. This
199a06587f5Smckusick  * function works by stealing pages from the (to be) managed page pool,
200a06587f5Smckusick  * stealing virtual address space, then mapping the pages and zeroing them.
201a06587f5Smckusick  *
202a06587f5Smckusick  * It should be used from pmap_bootstrap till vm_page_startup, afterwards
203a06587f5Smckusick  * it cannot be used, and will generate a panic if tried. Note that this
204a06587f5Smckusick  * memory will never be freed, and in essence it is wired down.
205a06587f5Smckusick  */
206a06587f5Smckusick void *
pmap_bootstrap_alloc(size)207a06587f5Smckusick pmap_bootstrap_alloc(size)
208a06587f5Smckusick 	int size;
209a06587f5Smckusick {
210a06587f5Smckusick 	vm_offset_t val;
211a06587f5Smckusick 	extern boolean_t vm_page_startup_initialized;
212a06587f5Smckusick 
213a06587f5Smckusick 	if (vm_page_startup_initialized)
214a06587f5Smckusick 		panic("pmap_bootstrap_alloc: called after startup initialized");
215a06587f5Smckusick 
21649b812f9Sralph 	val = MACH_PHYS_TO_CACHED(avail_start);
217a06587f5Smckusick 	size = round_page(size);
218a06587f5Smckusick 	avail_start += size;
219a06587f5Smckusick 
220a06587f5Smckusick 	blkclr((caddr_t)val, size);
221a06587f5Smckusick 	return ((void *)val);
222a06587f5Smckusick }
223a06587f5Smckusick 
224a06587f5Smckusick /*
225a06587f5Smckusick  *	Initialize the pmap module.
226a06587f5Smckusick  *	Called by vm_init, to initialize any structures that the pmap
227a06587f5Smckusick  *	system needs to map virtual memory.
228a06587f5Smckusick  */
229a06587f5Smckusick void
pmap_init(phys_start,phys_end)230a06587f5Smckusick pmap_init(phys_start, phys_end)
231a06587f5Smckusick 	vm_offset_t phys_start, phys_end;
232a06587f5Smckusick {
233a06587f5Smckusick 
234a06587f5Smckusick #ifdef DEBUG
235d8f00987Sralph 	if (pmapdebug & (PDB_FOLLOW|PDB_INIT))
236a06587f5Smckusick 		printf("pmap_init(%x, %x)\n", phys_start, phys_end);
237a06587f5Smckusick #endif
238a06587f5Smckusick }
239a06587f5Smckusick 
240a06587f5Smckusick /*
241a06587f5Smckusick  *	Create and return a physical map.
242a06587f5Smckusick  *
243a06587f5Smckusick  *	If the size specified for the map
244a06587f5Smckusick  *	is zero, the map is an actual physical
245a06587f5Smckusick  *	map, and may be referenced by the
246a06587f5Smckusick  *	hardware.
247a06587f5Smckusick  *
248a06587f5Smckusick  *	If the size specified is non-zero,
249a06587f5Smckusick  *	the map will be used in software only, and
250a06587f5Smckusick  *	is bounded by that size.
251a06587f5Smckusick  */
252a06587f5Smckusick pmap_t
pmap_create(size)253a06587f5Smckusick pmap_create(size)
254a06587f5Smckusick 	vm_size_t size;
255a06587f5Smckusick {
256a06587f5Smckusick 	register pmap_t pmap;
257a06587f5Smckusick 
258a06587f5Smckusick #ifdef DEBUG
259a06587f5Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
260a06587f5Smckusick 		printf("pmap_create(%x)\n", size);
261a06587f5Smckusick #endif
262a06587f5Smckusick 	/*
263a06587f5Smckusick 	 * Software use map does not need a pmap
264a06587f5Smckusick 	 */
265a06587f5Smckusick 	if (size)
266a06587f5Smckusick 		return (NULL);
267a06587f5Smckusick 
268a06587f5Smckusick 	/* XXX: is it ok to wait here? */
269a06587f5Smckusick 	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
270a06587f5Smckusick #ifdef notifwewait
271a06587f5Smckusick 	if (pmap == NULL)
272a06587f5Smckusick 		panic("pmap_create: cannot allocate a pmap");
273a06587f5Smckusick #endif
274a06587f5Smckusick 	bzero(pmap, sizeof(*pmap));
275a06587f5Smckusick 	pmap_pinit(pmap);
276a06587f5Smckusick 	return (pmap);
277a06587f5Smckusick }
278a06587f5Smckusick 
279a06587f5Smckusick /*
280a06587f5Smckusick  * Initialize a preallocated and zeroed pmap structure,
281a06587f5Smckusick  * such as one in a vmspace structure.
282a06587f5Smckusick  */
283a06587f5Smckusick void
pmap_pinit(pmap)284a06587f5Smckusick pmap_pinit(pmap)
285a06587f5Smckusick 	register struct pmap *pmap;
286a06587f5Smckusick {
287a06587f5Smckusick 	register int i;
288d8f00987Sralph 	int s;
289a06587f5Smckusick 	extern struct vmspace vmspace0;
290d8f00987Sralph 	extern struct user *proc0paddr;
291a06587f5Smckusick 
292a06587f5Smckusick #ifdef DEBUG
293a06587f5Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
294a06587f5Smckusick 		printf("pmap_pinit(%x)\n", pmap);
295a06587f5Smckusick #endif
296a06587f5Smckusick 	simple_lock_init(&pmap->pm_lock);
297a06587f5Smckusick 	pmap->pm_count = 1;
298d8f00987Sralph 	if (free_segtab) {
299d8f00987Sralph 		s = splimp();
300d8f00987Sralph 		pmap->pm_segtab = free_segtab;
301d8f00987Sralph 		free_segtab = *(struct segtab **)free_segtab;
302d8f00987Sralph 		pmap->pm_segtab->seg_tab[0] = NULL;
303d8f00987Sralph 		splx(s);
304d8f00987Sralph 	} else {
305d8f00987Sralph 		register struct segtab *stp;
306d8f00987Sralph 		vm_page_t mem;
307d8f00987Sralph 
308d8f00987Sralph 		mem = vm_page_alloc1();
309d8f00987Sralph 		pmap_zero_page(VM_PAGE_TO_PHYS(mem));
310d8f00987Sralph 		pmap->pm_segtab = stp = (struct segtab *)
311d8f00987Sralph 			MACH_PHYS_TO_CACHED(VM_PAGE_TO_PHYS(mem));
312d8f00987Sralph 		i = pmaxpagesperpage * (NBPG / sizeof(struct segtab));
313d8f00987Sralph 		s = splimp();
314d8f00987Sralph 		while (--i != 0) {
315d8f00987Sralph 			stp++;
316d8f00987Sralph 			*(struct segtab **)stp = free_segtab;
317d8f00987Sralph 			free_segtab = stp;
318d8f00987Sralph 		}
319d8f00987Sralph 		splx(s);
320d8f00987Sralph 	}
321d8f00987Sralph #ifdef DIAGNOSTIC
322d8f00987Sralph 	for (i = 0; i < PMAP_SEGTABSIZE; i++)
323d8f00987Sralph 		if (pmap->pm_segtab->seg_tab[i] != 0)
324d8f00987Sralph 			panic("pmap_pinit: pm_segtab != 0");
325d8f00987Sralph #endif
326d8f00987Sralph 	if (pmap == &vmspace0.vm_pmap) {
327d8f00987Sralph 		/*
328d8f00987Sralph 		 * The initial process has already been allocated a TLBPID
329d8f00987Sralph 		 * in mach_init().
330d8f00987Sralph 		 */
331d8f00987Sralph 		pmap->pm_tlbpid = 1;
332d8f00987Sralph 		pmap->pm_tlbgen = tlbpid_gen;
333d8f00987Sralph 		proc0paddr->u_pcb.pcb_segtab = (void *)pmap->pm_segtab;
334d8f00987Sralph 	} else {
335d8f00987Sralph 		pmap->pm_tlbpid = 0;
336d8f00987Sralph 		pmap->pm_tlbgen = 0;
337d8f00987Sralph 	}
338a06587f5Smckusick }
339a06587f5Smckusick 
340a06587f5Smckusick /*
341a06587f5Smckusick  *	Retire the given physical map from service.
342a06587f5Smckusick  *	Should only be called if the map contains
343a06587f5Smckusick  *	no valid mappings.
344a06587f5Smckusick  */
345a06587f5Smckusick void
pmap_destroy(pmap)346a06587f5Smckusick pmap_destroy(pmap)
347a06587f5Smckusick 	register pmap_t pmap;
348a06587f5Smckusick {
349a06587f5Smckusick 	int count;
350a06587f5Smckusick 
351a06587f5Smckusick #ifdef DEBUG
352d8f00987Sralph 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
353a06587f5Smckusick 		printf("pmap_destroy(%x)\n", pmap);
354a06587f5Smckusick #endif
355a06587f5Smckusick 	if (pmap == NULL)
356a06587f5Smckusick 		return;
357a06587f5Smckusick 
358a06587f5Smckusick 	simple_lock(&pmap->pm_lock);
359a06587f5Smckusick 	count = --pmap->pm_count;
360a06587f5Smckusick 	simple_unlock(&pmap->pm_lock);
361a06587f5Smckusick 	if (count == 0) {
362a06587f5Smckusick 		pmap_release(pmap);
363a06587f5Smckusick 		free((caddr_t)pmap, M_VMPMAP);
364a06587f5Smckusick 	}
365a06587f5Smckusick }
366a06587f5Smckusick 
367a06587f5Smckusick /*
368a06587f5Smckusick  * Release any resources held by the given physical map.
369a06587f5Smckusick  * Called when a pmap initialized by pmap_pinit is being released.
370a06587f5Smckusick  * Should only be called if the map contains no valid mappings.
371a06587f5Smckusick  */
372a06587f5Smckusick void
pmap_release(pmap)373a06587f5Smckusick pmap_release(pmap)
374a06587f5Smckusick 	register pmap_t pmap;
375a06587f5Smckusick {
376a06587f5Smckusick 
377a06587f5Smckusick #ifdef DEBUG
378d8f00987Sralph 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
379a06587f5Smckusick 		printf("pmap_release(%x)\n", pmap);
380a06587f5Smckusick #endif
381a06587f5Smckusick 
382d8f00987Sralph 	if (pmap->pm_segtab) {
383d8f00987Sralph 		register pt_entry_t *pte;
384d8f00987Sralph 		register int i;
385d8f00987Sralph 		int s;
386d8f00987Sralph #ifdef DIAGNOSTIC
387d8f00987Sralph 		register int j;
388d8f00987Sralph #endif
389d8f00987Sralph 
390d8f00987Sralph 		for (i = 0; i < PMAP_SEGTABSIZE; i++) {
391d8f00987Sralph 			/* get pointer to segment map */
392d8f00987Sralph 			pte = pmap->pm_segtab->seg_tab[i];
393d8f00987Sralph 			if (!pte)
394d8f00987Sralph 				continue;
395d8f00987Sralph 			vm_page_free1(
396d8f00987Sralph 				PHYS_TO_VM_PAGE(MACH_CACHED_TO_PHYS(pte)));
397d8f00987Sralph #ifdef DIAGNOSTIC
398d8f00987Sralph 			for (j = 0; j < NPTEPG; j++) {
399d8f00987Sralph 				if (pte->pt_entry)
400d8f00987Sralph 					panic("pmap_release: segmap not empty");
401a06587f5Smckusick 			}
402a06587f5Smckusick #endif
403d8f00987Sralph 			pmap->pm_segtab->seg_tab[i] = NULL;
404d8f00987Sralph 		}
405d8f00987Sralph 		s = splimp();
406d8f00987Sralph 		*(struct segtab **)pmap->pm_segtab = free_segtab;
407d8f00987Sralph 		free_segtab = pmap->pm_segtab;
408d8f00987Sralph 		splx(s);
409d8f00987Sralph 		pmap->pm_segtab = NULL;
410d8f00987Sralph 	}
411a06587f5Smckusick }
412a06587f5Smckusick 
413a06587f5Smckusick /*
414a06587f5Smckusick  *	Add a reference to the specified pmap.
415a06587f5Smckusick  */
416a06587f5Smckusick void
pmap_reference(pmap)417a06587f5Smckusick pmap_reference(pmap)
418a06587f5Smckusick 	pmap_t pmap;
419a06587f5Smckusick {
420a06587f5Smckusick 
421a06587f5Smckusick #ifdef DEBUG
422a06587f5Smckusick 	if (pmapdebug & PDB_FOLLOW)
423a06587f5Smckusick 		printf("pmap_reference(%x)\n", pmap);
424a06587f5Smckusick #endif
425a06587f5Smckusick 	if (pmap != NULL) {
426a06587f5Smckusick 		simple_lock(&pmap->pm_lock);
427a06587f5Smckusick 		pmap->pm_count++;
428a06587f5Smckusick 		simple_unlock(&pmap->pm_lock);
429a06587f5Smckusick 	}
430a06587f5Smckusick }
431a06587f5Smckusick 
432a06587f5Smckusick /*
433a06587f5Smckusick  *	Remove the given range of addresses from the specified map.
434a06587f5Smckusick  *
435a06587f5Smckusick  *	It is assumed that the start and end are properly
436a06587f5Smckusick  *	rounded to the page size.
437a06587f5Smckusick  */
438a06587f5Smckusick void
pmap_remove(pmap,sva,eva)439a06587f5Smckusick pmap_remove(pmap, sva, eva)
440a06587f5Smckusick 	register pmap_t pmap;
441a06587f5Smckusick 	vm_offset_t sva, eva;
442a06587f5Smckusick {
443d8f00987Sralph 	register vm_offset_t nssva;
444d8f00987Sralph 	register pt_entry_t *pte;
445a06587f5Smckusick 	unsigned entry;
446a06587f5Smckusick 
447a06587f5Smckusick #ifdef DEBUG
448a06587f5Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
449a06587f5Smckusick 		printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
450a06587f5Smckusick 	remove_stats.calls++;
451a06587f5Smckusick #endif
452a06587f5Smckusick 	if (pmap == NULL)
453a06587f5Smckusick 		return;
454a06587f5Smckusick 
455d8f00987Sralph 	if (!pmap->pm_segtab) {
456a06587f5Smckusick 		register pt_entry_t *pte;
457a06587f5Smckusick 
458a06587f5Smckusick 		/* remove entries from kernel pmap */
45949b812f9Sralph #ifdef DIAGNOSTIC
460d8f00987Sralph 		if (sva < VM_MIN_KERNEL_ADDRESS || eva > virtual_end)
461d8f00987Sralph 			panic("pmap_remove: kva not in range");
46249b812f9Sralph #endif
463a06587f5Smckusick 		pte = kvtopte(sva);
464d8f00987Sralph 		for (; sva < eva; sva += NBPG, pte++) {
465a06587f5Smckusick 			entry = pte->pt_entry;
466a06587f5Smckusick 			if (!(entry & PG_V))
467a06587f5Smckusick 				continue;
468a06587f5Smckusick 			if (entry & PG_WIRED)
469a06587f5Smckusick 				pmap->pm_stats.wired_count--;
470a06587f5Smckusick 			pmap->pm_stats.resident_count--;
471d8f00987Sralph 			pmap_remove_pv(pmap, sva, entry & PG_FRAME);
472a06587f5Smckusick #ifdef ATTR
473d8f00987Sralph 			pmap_attributes[atop(entry & PG_FRAME)] = 0;
474a06587f5Smckusick #endif
475a06587f5Smckusick 			pte->pt_entry = PG_NV;
476a06587f5Smckusick 			/*
477a06587f5Smckusick 			 * Flush the TLB for the given address.
478a06587f5Smckusick 			 */
479d8f00987Sralph 			MachTLBFlushAddr(sva);
480a06587f5Smckusick #ifdef DEBUG
481a06587f5Smckusick 			remove_stats.flushes++;
482a06587f5Smckusick #endif
483a06587f5Smckusick 		}
484a06587f5Smckusick 		return;
485a06587f5Smckusick 	}
486a06587f5Smckusick 
487d8f00987Sralph #ifdef DIAGNOSTIC
488d8f00987Sralph 	if (eva > VM_MAXUSER_ADDRESS)
489d8f00987Sralph 		panic("pmap_remove: uva not in range");
490d8f00987Sralph #endif
491d8f00987Sralph 	while (sva < eva) {
492d8f00987Sralph 		nssva = pmax_trunc_seg(sva) + NBSEG;
493d8f00987Sralph 		if (nssva == 0 || nssva > eva)
494d8f00987Sralph 			nssva = eva;
495a06587f5Smckusick 		/*
496d8f00987Sralph 		 * If VA belongs to an unallocated segment,
497d8f00987Sralph 		 * skip to the next segment boundary.
498a06587f5Smckusick 		 */
499d8f00987Sralph 		if (!(pte = pmap_segmap(pmap, sva))) {
500d8f00987Sralph 			sva = nssva;
501a06587f5Smckusick 			continue;
502d8f00987Sralph 		}
503d8f00987Sralph 		/*
504d8f00987Sralph 		 * Invalidate every valid mapping within this segment.
505d8f00987Sralph 		 */
506d8f00987Sralph 		pte += (sva >> PGSHIFT) & (NPTEPG - 1);
507d8f00987Sralph 		for (; sva < nssva; sva += NBPG, pte++) {
508d8f00987Sralph 			entry = pte->pt_entry;
509d8f00987Sralph 			if (!(entry & PG_V))
510d8f00987Sralph 				continue;
511a06587f5Smckusick 			if (entry & PG_WIRED)
512a06587f5Smckusick 				pmap->pm_stats.wired_count--;
513a06587f5Smckusick 			pmap->pm_stats.resident_count--;
514d8f00987Sralph 			pmap_remove_pv(pmap, sva, entry & PG_FRAME);
515a06587f5Smckusick #ifdef ATTR
516d8f00987Sralph 			pmap_attributes[atop(entry & PG_FRAME)] = 0;
517a06587f5Smckusick #endif
518d8f00987Sralph 			pte->pt_entry = PG_NV;
519a06587f5Smckusick 			/*
520a06587f5Smckusick 			 * Flush the TLB for the given address.
521a06587f5Smckusick 			 */
522d8f00987Sralph 			if (pmap->pm_tlbgen == tlbpid_gen) {
523d8f00987Sralph 				MachTLBFlushAddr(sva | (pmap->pm_tlbpid <<
524d8f00987Sralph 					VMMACH_TLB_PID_SHIFT));
525a06587f5Smckusick #ifdef DEBUG
526a06587f5Smckusick 				remove_stats.flushes++;
527a06587f5Smckusick #endif
528a06587f5Smckusick 			}
529a06587f5Smckusick 		}
530d8f00987Sralph 	}
531d8f00987Sralph }
532a06587f5Smckusick 
533a06587f5Smckusick /*
534a06587f5Smckusick  *	pmap_page_protect:
535a06587f5Smckusick  *
536a06587f5Smckusick  *	Lower the permission for all mappings to a given page.
537a06587f5Smckusick  */
538a06587f5Smckusick void
pmap_page_protect(pa,prot)539a06587f5Smckusick pmap_page_protect(pa, prot)
540a06587f5Smckusick 	vm_offset_t pa;
541a06587f5Smckusick 	vm_prot_t prot;
542a06587f5Smckusick {
543a06587f5Smckusick 	register pv_entry_t pv;
544a06587f5Smckusick 	register vm_offset_t va;
545a06587f5Smckusick 	int s;
546a06587f5Smckusick 
547a06587f5Smckusick #ifdef DEBUG
548a06587f5Smckusick 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
549a06587f5Smckusick 	    prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
550a06587f5Smckusick 		printf("pmap_page_protect(%x, %x)\n", pa, prot);
551a06587f5Smckusick #endif
552a06587f5Smckusick 	if (!IS_VM_PHYSADDR(pa))
553a06587f5Smckusick 		return;
554a06587f5Smckusick 
555a06587f5Smckusick 	switch (prot) {
556d8f00987Sralph 	case VM_PROT_READ|VM_PROT_WRITE:
557a06587f5Smckusick 	case VM_PROT_ALL:
558a06587f5Smckusick 		break;
559a06587f5Smckusick 
560a06587f5Smckusick 	/* copy_on_write */
561a06587f5Smckusick 	case VM_PROT_READ:
562a06587f5Smckusick 	case VM_PROT_READ|VM_PROT_EXECUTE:
563a06587f5Smckusick 		pv = pa_to_pvh(pa);
564a06587f5Smckusick 		s = splimp();
565a06587f5Smckusick 		/*
566a06587f5Smckusick 		 * Loop over all current mappings setting/clearing as appropos.
567a06587f5Smckusick 		 */
568a06587f5Smckusick 		if (pv->pv_pmap != NULL) {
569a06587f5Smckusick 			for (; pv; pv = pv->pv_next) {
570a06587f5Smckusick 				extern vm_offset_t pager_sva, pager_eva;
571d8f00987Sralph 
572a06587f5Smckusick 				va = pv->pv_va;
573a06587f5Smckusick 
574a06587f5Smckusick 				/*
575a06587f5Smckusick 				 * XXX don't write protect pager mappings
576a06587f5Smckusick 				 */
577a06587f5Smckusick 				if (va >= pager_sva && va < pager_eva)
578a06587f5Smckusick 					continue;
579a06587f5Smckusick 				pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
580a06587f5Smckusick 					prot);
581a06587f5Smckusick 			}
582a06587f5Smckusick 		}
583a06587f5Smckusick 		splx(s);
584a06587f5Smckusick 		break;
585a06587f5Smckusick 
586a06587f5Smckusick 	/* remove_all */
587a06587f5Smckusick 	default:
588a06587f5Smckusick 		pv = pa_to_pvh(pa);
589a06587f5Smckusick 		s = splimp();
590a06587f5Smckusick 		while (pv->pv_pmap != NULL) {
591a06587f5Smckusick 			pmap_remove(pv->pv_pmap, pv->pv_va,
592a06587f5Smckusick 				    pv->pv_va + PAGE_SIZE);
593a06587f5Smckusick 		}
594a06587f5Smckusick 		splx(s);
595a06587f5Smckusick 	}
596a06587f5Smckusick }
597a06587f5Smckusick 
598a06587f5Smckusick /*
599a06587f5Smckusick  *	Set the physical protection on the
600a06587f5Smckusick  *	specified range of this map as requested.
601a06587f5Smckusick  */
602a06587f5Smckusick void
pmap_protect(pmap,sva,eva,prot)603a06587f5Smckusick pmap_protect(pmap, sva, eva, prot)
604a06587f5Smckusick 	register pmap_t pmap;
605a06587f5Smckusick 	vm_offset_t sva, eva;
606a06587f5Smckusick 	vm_prot_t prot;
607a06587f5Smckusick {
608d8f00987Sralph 	register vm_offset_t nssva;
609d8f00987Sralph 	register pt_entry_t *pte;
610d8f00987Sralph 	register unsigned entry;
611a06587f5Smckusick 	u_int p;
612a06587f5Smckusick 
613a06587f5Smckusick #ifdef DEBUG
614a06587f5Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
615a06587f5Smckusick 		printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
616a06587f5Smckusick #endif
617a06587f5Smckusick 	if (pmap == NULL)
618a06587f5Smckusick 		return;
619a06587f5Smckusick 
620d8f00987Sralph 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
621a06587f5Smckusick 		pmap_remove(pmap, sva, eva);
622a06587f5Smckusick 		return;
623a06587f5Smckusick 	}
624a06587f5Smckusick 
625d8f00987Sralph 	p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
626a06587f5Smckusick 
627d8f00987Sralph 	if (!pmap->pm_segtab) {
628a06587f5Smckusick 		/*
629a06587f5Smckusick 		 * Change entries in kernel pmap.
630a06587f5Smckusick 		 * This will trap if the page is writeable (in order to set
631a06587f5Smckusick 		 * the dirty bit) even if the dirty bit is already set. The
632a06587f5Smckusick 		 * optimization isn't worth the effort since this code isn't
633a06587f5Smckusick 		 * executed much. The common case is to make a user page
634a06587f5Smckusick 		 * read-only.
635a06587f5Smckusick 		 */
63649b812f9Sralph #ifdef DIAGNOSTIC
637d8f00987Sralph 		if (sva < VM_MIN_KERNEL_ADDRESS || eva > virtual_end)
638d8f00987Sralph 			panic("pmap_protect: kva not in range");
63949b812f9Sralph #endif
640a06587f5Smckusick 		pte = kvtopte(sva);
641d8f00987Sralph 		for (; sva < eva; sva += NBPG, pte++) {
642d8f00987Sralph 			entry = pte->pt_entry;
643d8f00987Sralph 			if (!(entry & PG_V))
644a06587f5Smckusick 				continue;
645d8f00987Sralph 			entry = (entry & ~(PG_M | PG_RO)) | p;
646d8f00987Sralph 			pte->pt_entry = entry;
647a06587f5Smckusick 			/*
648a06587f5Smckusick 			 * Update the TLB if the given address is in the cache.
649a06587f5Smckusick 			 */
650d8f00987Sralph 			MachTLBUpdate(sva, entry);
651a06587f5Smckusick 		}
652a06587f5Smckusick 		return;
653a06587f5Smckusick 	}
654a06587f5Smckusick 
655d8f00987Sralph #ifdef DIAGNOSTIC
656d8f00987Sralph 	if (eva > VM_MAXUSER_ADDRESS)
657d8f00987Sralph 		panic("pmap_protect: uva not in range");
658d8f00987Sralph #endif
659d8f00987Sralph 	while (sva < eva) {
660d8f00987Sralph 		nssva = pmax_trunc_seg(sva) + NBSEG;
661d8f00987Sralph 		if (nssva == 0 || nssva > eva)
662d8f00987Sralph 			nssva = eva;
663a06587f5Smckusick 		/*
664d8f00987Sralph 		 * If VA belongs to an unallocated segment,
665d8f00987Sralph 		 * skip to the next segment boundary.
666a06587f5Smckusick 		 */
667d8f00987Sralph 		if (!(pte = pmap_segmap(pmap, sva))) {
668d8f00987Sralph 			sva = nssva;
669a06587f5Smckusick 			continue;
670a06587f5Smckusick 		}
671d8f00987Sralph 		/*
672d8f00987Sralph 		 * Change protection on every valid mapping within this segment.
673d8f00987Sralph 		 */
674d8f00987Sralph 		pte += (sva >> PGSHIFT) & (NPTEPG - 1);
675d8f00987Sralph 		for (; sva < nssva; sva += NBPG, pte++) {
676d8f00987Sralph 			entry = pte->pt_entry;
677d8f00987Sralph 			if (!(entry & PG_V))
678a06587f5Smckusick 				continue;
679d8f00987Sralph 			entry = (entry & ~(PG_M | PG_RO)) | p;
680d8f00987Sralph 			pte->pt_entry = entry;
681a06587f5Smckusick 			/*
682a06587f5Smckusick 			 * Update the TLB if the given address is in the cache.
683a06587f5Smckusick 			 */
684d8f00987Sralph 			if (pmap->pm_tlbgen == tlbpid_gen)
685d8f00987Sralph 				MachTLBUpdate(sva | (pmap->pm_tlbpid <<
686d8f00987Sralph 					VMMACH_TLB_PID_SHIFT), entry);
687d8f00987Sralph 		}
688a06587f5Smckusick 	}
689a06587f5Smckusick }
690a06587f5Smckusick 
691a06587f5Smckusick /*
692a06587f5Smckusick  *	Insert the given physical page (p) at
693a06587f5Smckusick  *	the specified virtual address (v) in the
694a06587f5Smckusick  *	target physical map with the protection requested.
695a06587f5Smckusick  *
696a06587f5Smckusick  *	If specified, the page will be wired down, meaning
697a06587f5Smckusick  *	that the related pte can not be reclaimed.
698a06587f5Smckusick  *
699a06587f5Smckusick  *	NB:  This is the only routine which MAY NOT lazy-evaluate
700a06587f5Smckusick  *	or lose information.  That is, this routine must actually
701a06587f5Smckusick  *	insert this page into the given map NOW.
702a06587f5Smckusick  */
703a06587f5Smckusick void
pmap_enter(pmap,va,pa,prot,wired)704a06587f5Smckusick pmap_enter(pmap, va, pa, prot, wired)
705a06587f5Smckusick 	register pmap_t pmap;
706a06587f5Smckusick 	vm_offset_t va;
707a06587f5Smckusick 	register vm_offset_t pa;
708a06587f5Smckusick 	vm_prot_t prot;
709a06587f5Smckusick 	boolean_t wired;
710a06587f5Smckusick {
711d8f00987Sralph 	register pt_entry_t *pte;
712a06587f5Smckusick 	register u_int npte;
71370effb03Smckusick 	register int i, j;
714d8f00987Sralph 	vm_page_t mem;
715a06587f5Smckusick 
716a06587f5Smckusick #ifdef DEBUG
717a06587f5Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
718a06587f5Smckusick 		printf("pmap_enter(%x, %x, %x, %x, %x)\n",
719a06587f5Smckusick 		       pmap, va, pa, prot, wired);
720a06587f5Smckusick #endif
721a06587f5Smckusick #ifdef DIAGNOSTIC
722a06587f5Smckusick 	if (!pmap)
723a06587f5Smckusick 		panic("pmap_enter: pmap");
724d8f00987Sralph 	if (!pmap->pm_segtab) {
725a06587f5Smckusick 		enter_stats.kernel++;
726d8f00987Sralph 		if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end)
727a06587f5Smckusick 			panic("pmap_enter: kva");
728a06587f5Smckusick 	} else {
729a06587f5Smckusick 		enter_stats.user++;
730d8f00987Sralph 		if (va >= VM_MAXUSER_ADDRESS)
731a06587f5Smckusick 			panic("pmap_enter: uva");
732a06587f5Smckusick 	}
73349b812f9Sralph 	if (pa & 0x80000000)
73449b812f9Sralph 		panic("pmap_enter: pa");
735a06587f5Smckusick 	if (!(prot & VM_PROT_READ))
736a06587f5Smckusick 		panic("pmap_enter: prot");
737a06587f5Smckusick #endif
738a06587f5Smckusick 
739a06587f5Smckusick 	if (IS_VM_PHYSADDR(pa)) {
740a06587f5Smckusick 		register pv_entry_t pv, npv;
741a06587f5Smckusick 		int s;
742a06587f5Smckusick 
743a06587f5Smckusick 		if (!(prot & VM_PROT_WRITE))
744a06587f5Smckusick 			npte = PG_RO;
745a06587f5Smckusick 		else {
746a06587f5Smckusick 			register vm_page_t mem;
747a06587f5Smckusick 
748a06587f5Smckusick 			mem = PHYS_TO_VM_PAGE(pa);
749a06587f5Smckusick 			if ((int)va < 0) {
750a06587f5Smckusick 				/*
751a06587f5Smckusick 				 * Don't bother to trap on kernel writes,
752a06587f5Smckusick 				 * just record page as dirty.
753a06587f5Smckusick 				 */
754a06587f5Smckusick 				npte = PG_M;
75555a35b26Sralph 				mem->flags &= ~PG_CLEAN;
756a06587f5Smckusick 			} else
757a06587f5Smckusick #ifdef ATTR
758d8f00987Sralph 				if ((pmap_attributes[atop(pa)] &
75955a35b26Sralph 				    PMAP_ATTR_MOD) || !(mem->flags & PG_CLEAN))
760a06587f5Smckusick #else
76155a35b26Sralph 				if (!(mem->flags & PG_CLEAN))
762a06587f5Smckusick #endif
763a06587f5Smckusick 					npte = PG_M;
764a06587f5Smckusick 			else
765a06587f5Smckusick 				npte = 0;
766a06587f5Smckusick 		}
767a06587f5Smckusick 
768a06587f5Smckusick #ifdef DEBUG
769a06587f5Smckusick 		enter_stats.managed++;
770a06587f5Smckusick #endif
771a06587f5Smckusick 		/*
772a06587f5Smckusick 		 * Enter the pmap and virtual address into the
773a06587f5Smckusick 		 * physical to virtual map table.
774a06587f5Smckusick 		 */
775a06587f5Smckusick 		pv = pa_to_pvh(pa);
776a06587f5Smckusick 		s = splimp();
777a06587f5Smckusick #ifdef DEBUG
778a06587f5Smckusick 		if (pmapdebug & PDB_ENTER)
779a06587f5Smckusick 			printf("pmap_enter: pv %x: was %x/%x/%x\n",
780a06587f5Smckusick 			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
781a06587f5Smckusick #endif
782a06587f5Smckusick 		if (pv->pv_pmap == NULL) {
783a06587f5Smckusick 			/*
784a06587f5Smckusick 			 * No entries yet, use header as the first entry
785a06587f5Smckusick 			 */
786a06587f5Smckusick #ifdef DEBUG
787d8f00987Sralph 			if (pmapdebug & PDB_PVENTRY)
788d8f00987Sralph 				printf("pmap_enter: first pv: pmap %x va %x\n",
789d8f00987Sralph 					pmap, va);
790a06587f5Smckusick 			enter_stats.firstpv++;
791a06587f5Smckusick #endif
792a06587f5Smckusick 			pv->pv_va = va;
793a06587f5Smckusick 			pv->pv_pmap = pmap;
794a06587f5Smckusick 			pv->pv_next = NULL;
795a06587f5Smckusick 		} else {
796a06587f5Smckusick 			/*
797a06587f5Smckusick 			 * There is at least one other VA mapping this page.
798a06587f5Smckusick 			 * Place this entry after the header.
799a06587f5Smckusick 			 *
800a06587f5Smckusick 			 * Note: the entry may already be in the table if
801a06587f5Smckusick 			 * we are only changing the protection bits.
802a06587f5Smckusick 			 */
803a06587f5Smckusick 			for (npv = pv; npv; npv = npv->pv_next)
804a06587f5Smckusick 				if (pmap == npv->pv_pmap && va == npv->pv_va) {
805a06587f5Smckusick #ifdef DIAGNOSTIC
806a06587f5Smckusick 					unsigned entry;
807a06587f5Smckusick 
808d8f00987Sralph 					if (!pmap->pm_segtab)
809a06587f5Smckusick 						entry = kvtopte(va)->pt_entry;
810d8f00987Sralph 					else {
811d8f00987Sralph 						pte = pmap_segmap(pmap, va);
812d8f00987Sralph 						if (pte) {
813d8f00987Sralph 							pte += (va >> PGSHIFT) &
814d8f00987Sralph 							    (NPTEPG - 1);
815d8f00987Sralph 							entry = pte->pt_entry;
816d8f00987Sralph 						} else
817d8f00987Sralph 							entry = 0;
818d8f00987Sralph 					}
819a06587f5Smckusick 					if (!(entry & PG_V) ||
820a06587f5Smckusick 					    (entry & PG_FRAME) != pa)
821d8f00987Sralph 						printf(
822d8f00987Sralph 			"pmap_enter: found va %x pa %x in pv_table but != %x\n",
823a06587f5Smckusick 							va, pa, entry);
824a06587f5Smckusick #endif
825a06587f5Smckusick 					goto fnd;
826a06587f5Smckusick 				}
827d8f00987Sralph #ifdef DEBUG
828d8f00987Sralph 			if (pmapdebug & PDB_PVENTRY)
829d8f00987Sralph 				printf("pmap_enter: new pv: pmap %x va %x\n",
830d8f00987Sralph 					pmap, va);
831d8f00987Sralph #endif
832a06587f5Smckusick 			/* can this cause us to recurse forever? */
833a06587f5Smckusick 			npv = (pv_entry_t)
834a06587f5Smckusick 				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
835a06587f5Smckusick 			npv->pv_va = va;
836a06587f5Smckusick 			npv->pv_pmap = pmap;
837a06587f5Smckusick 			npv->pv_next = pv->pv_next;
838a06587f5Smckusick 			pv->pv_next = npv;
839a06587f5Smckusick #ifdef DEBUG
840a06587f5Smckusick 			if (!npv->pv_next)
841a06587f5Smckusick 				enter_stats.secondpv++;
842a06587f5Smckusick #endif
843a06587f5Smckusick 		fnd:
844a06587f5Smckusick 			;
845a06587f5Smckusick 		}
846a06587f5Smckusick 		splx(s);
847a06587f5Smckusick 	} else {
848a06587f5Smckusick 		/*
849a06587f5Smckusick 		 * Assumption: if it is not part of our managed memory
850a06587f5Smckusick 		 * then it must be device memory which may be volitile.
851a06587f5Smckusick 		 */
852a06587f5Smckusick #ifdef DEBUG
853a06587f5Smckusick 		enter_stats.unmanaged++;
854a06587f5Smckusick #endif
85566261678Sralph 		npte = (prot & VM_PROT_WRITE) ? (PG_M | PG_N) : (PG_RO | PG_N);
856a06587f5Smckusick 	}
857a06587f5Smckusick 
85849b812f9Sralph 	/*
85949b812f9Sralph 	 * The only time we need to flush the cache is if we
86049b812f9Sralph 	 * execute from a physical address and then change the data.
86149b812f9Sralph 	 * This is the best place to do this.
86249b812f9Sralph 	 * pmap_protect() and pmap_remove() are mostly used to switch
86349b812f9Sralph 	 * between R/W and R/O pages.
86449b812f9Sralph 	 * NOTE: we only support cache flush for read only text.
86549b812f9Sralph 	 */
86649b812f9Sralph 	if (prot == (VM_PROT_READ | VM_PROT_EXECUTE))
86746463256Sralph 		MachFlushICache(MACH_PHYS_TO_CACHED(pa), PAGE_SIZE);
86849b812f9Sralph 
869d8f00987Sralph 	if (!pmap->pm_segtab) {
870a06587f5Smckusick 		/* enter entries into kernel pmap */
871a06587f5Smckusick 		pte = kvtopte(va);
872a06587f5Smckusick 		npte |= pa | PG_V | PG_G;
873a06587f5Smckusick 		if (wired) {
874a06587f5Smckusick 			pmap->pm_stats.wired_count += pmaxpagesperpage;
875a06587f5Smckusick 			npte |= PG_WIRED;
876a06587f5Smckusick 		}
877a06587f5Smckusick 		i = pmaxpagesperpage;
878a06587f5Smckusick 		do {
879a06587f5Smckusick 			if (!(pte->pt_entry & PG_V)) {
880a06587f5Smckusick 				pmap->pm_stats.resident_count++;
881a06587f5Smckusick 			} else {
88279123f99Sralph #ifdef DIAGNOSTIC
88379123f99Sralph 				if (pte->pt_entry & PG_WIRED)
88479123f99Sralph 					panic("pmap_enter: kernel wired");
88579123f99Sralph #endif
886d8f00987Sralph 			}
887a06587f5Smckusick 			/*
888a06587f5Smckusick 			 * Update the same virtual address entry.
889a06587f5Smckusick 			 */
890a06587f5Smckusick 			MachTLBUpdate(va, npte);
891a06587f5Smckusick 			pte->pt_entry = npte;
892a06587f5Smckusick 			va += NBPG;
893a06587f5Smckusick 			npte += NBPG;
894a06587f5Smckusick 			pte++;
895a06587f5Smckusick 		} while (--i != 0);
896a06587f5Smckusick 		return;
897a06587f5Smckusick 	}
898a06587f5Smckusick 
899d8f00987Sralph 	if (!(pte = pmap_segmap(pmap, va))) {
900d8f00987Sralph 		mem = vm_page_alloc1();
901d8f00987Sralph 		pmap_zero_page(VM_PAGE_TO_PHYS(mem));
902d8f00987Sralph 		pmap_segmap(pmap, va) = pte = (pt_entry_t *)
903d8f00987Sralph 			MACH_PHYS_TO_CACHED(VM_PAGE_TO_PHYS(mem));
904d8f00987Sralph 	}
905d8f00987Sralph 	pte += (va >> PGSHIFT) & (NPTEPG - 1);
906d8f00987Sralph 
907a06587f5Smckusick 	/*
908a06587f5Smckusick 	 * Now validate mapping with desired protection/wiring.
909a06587f5Smckusick 	 * Assume uniform modified and referenced status for all
910a06587f5Smckusick 	 * PMAX pages in a MACH page.
911a06587f5Smckusick 	 */
912a06587f5Smckusick 	npte |= pa | PG_V;
913a06587f5Smckusick 	if (wired) {
914a06587f5Smckusick 		pmap->pm_stats.wired_count += pmaxpagesperpage;
915a06587f5Smckusick 		npte |= PG_WIRED;
916a06587f5Smckusick 	}
917a06587f5Smckusick #ifdef DEBUG
918d8f00987Sralph 	if (pmapdebug & PDB_ENTER) {
919d8f00987Sralph 		printf("pmap_enter: new pte %x", npte);
920d8f00987Sralph 		if (pmap->pm_tlbgen == tlbpid_gen)
921d8f00987Sralph 			printf(" tlbpid %d", pmap->pm_tlbpid);
922d8f00987Sralph 		printf("\n");
923d8f00987Sralph 	}
924a06587f5Smckusick #endif
925a06587f5Smckusick 	i = pmaxpagesperpage;
926a06587f5Smckusick 	do {
927d8f00987Sralph 		pte->pt_entry = npte;
928d8f00987Sralph 		if (pmap->pm_tlbgen == tlbpid_gen)
929d8f00987Sralph 			MachTLBUpdate(va | (pmap->pm_tlbpid <<
930d8f00987Sralph 				VMMACH_TLB_PID_SHIFT), npte);
931a06587f5Smckusick 		va += NBPG;
932a06587f5Smckusick 		npte += NBPG;
933d8f00987Sralph 		pte++;
934a06587f5Smckusick 	} while (--i != 0);
935a06587f5Smckusick }
936a06587f5Smckusick 
937a06587f5Smckusick /*
938a06587f5Smckusick  *	Routine:	pmap_change_wiring
939a06587f5Smckusick  *	Function:	Change the wiring attribute for a map/virtual-address
940a06587f5Smckusick  *			pair.
941a06587f5Smckusick  *	In/out conditions:
942a06587f5Smckusick  *			The mapping must already exist in the pmap.
943a06587f5Smckusick  */
944a06587f5Smckusick void
pmap_change_wiring(pmap,va,wired)945a06587f5Smckusick pmap_change_wiring(pmap, va, wired)
946a06587f5Smckusick 	register pmap_t	pmap;
947a06587f5Smckusick 	vm_offset_t va;
948a06587f5Smckusick 	boolean_t wired;
949a06587f5Smckusick {
950d8f00987Sralph 	register pt_entry_t *pte;
951a06587f5Smckusick 	u_int p;
952d8f00987Sralph 	register int i;
953a06587f5Smckusick 
954a06587f5Smckusick #ifdef DEBUG
955d8f00987Sralph 	if (pmapdebug & (PDB_FOLLOW|PDB_WIRING))
956a06587f5Smckusick 		printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);
957a06587f5Smckusick #endif
958a06587f5Smckusick 	if (pmap == NULL)
959a06587f5Smckusick 		return;
960a06587f5Smckusick 
961a06587f5Smckusick 	p = wired ? PG_WIRED : 0;
962a06587f5Smckusick 
963a06587f5Smckusick 	/*
964a06587f5Smckusick 	 * Don't need to flush the TLB since PG_WIRED is only in software.
965a06587f5Smckusick 	 */
966d8f00987Sralph 	if (!pmap->pm_segtab) {
967a06587f5Smckusick 		/* change entries in kernel pmap */
96849b812f9Sralph #ifdef DIAGNOSTIC
969d8f00987Sralph 		if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end)
97049b812f9Sralph 			panic("pmap_change_wiring");
97149b812f9Sralph #endif
972a06587f5Smckusick 		pte = kvtopte(va);
973d8f00987Sralph 	} else {
974d8f00987Sralph 		if (!(pte = pmap_segmap(pmap, va)))
975d8f00987Sralph 			return;
976d8f00987Sralph 		pte += (va >> PGSHIFT) & (NPTEPG - 1);
977d8f00987Sralph 	}
978d8f00987Sralph 
979a06587f5Smckusick 	i = pmaxpagesperpage;
980a06587f5Smckusick 	if (!(pte->pt_entry & PG_WIRED) && p)
981a06587f5Smckusick 		pmap->pm_stats.wired_count += i;
982a06587f5Smckusick 	else if ((pte->pt_entry & PG_WIRED) && !p)
983a06587f5Smckusick 		pmap->pm_stats.wired_count -= i;
984a06587f5Smckusick 	do {
985d8f00987Sralph 		if (pte->pt_entry & PG_V)
986a06587f5Smckusick 			pte->pt_entry = (pte->pt_entry & ~PG_WIRED) | p;
987a06587f5Smckusick 		pte++;
988a06587f5Smckusick 	} while (--i != 0);
989a06587f5Smckusick }
990a06587f5Smckusick 
991a06587f5Smckusick /*
992a06587f5Smckusick  *	Routine:	pmap_extract
993a06587f5Smckusick  *	Function:
994a06587f5Smckusick  *		Extract the physical page address associated
995a06587f5Smckusick  *		with the given map/virtual_address pair.
996a06587f5Smckusick  */
997a06587f5Smckusick vm_offset_t
pmap_extract(pmap,va)998a06587f5Smckusick pmap_extract(pmap, va)
999a06587f5Smckusick 	register pmap_t	pmap;
1000a06587f5Smckusick 	vm_offset_t va;
1001a06587f5Smckusick {
1002a06587f5Smckusick 	register vm_offset_t pa;
1003a06587f5Smckusick 
1004a06587f5Smckusick #ifdef DEBUG
1005a06587f5Smckusick 	if (pmapdebug & PDB_FOLLOW)
1006a06587f5Smckusick 		printf("pmap_extract(%x, %x) -> ", pmap, va);
1007a06587f5Smckusick #endif
1008a06587f5Smckusick 
1009d8f00987Sralph 	if (!pmap->pm_segtab) {
101049b812f9Sralph #ifdef DIAGNOSTIC
1011d8f00987Sralph 		if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end)
101249b812f9Sralph 			panic("pmap_extract");
101349b812f9Sralph #endif
1014a06587f5Smckusick 		pa = kvtopte(va)->pt_entry & PG_FRAME;
1015d8f00987Sralph 	} else {
1016d8f00987Sralph 		register pt_entry_t *pte;
1017d8f00987Sralph 
1018d8f00987Sralph 		if (!(pte = pmap_segmap(pmap, va)))
1019a06587f5Smckusick 			pa = 0;
1020d8f00987Sralph 		else {
1021d8f00987Sralph 			pte += (va >> PGSHIFT) & (NPTEPG - 1);
1022d8f00987Sralph 			pa = pte->pt_entry & PG_FRAME;
1023d8f00987Sralph 		}
1024d8f00987Sralph 	}
1025d8f00987Sralph 	if (pa)
1026d8f00987Sralph 		pa |= va & PGOFSET;
1027a06587f5Smckusick 
1028a06587f5Smckusick #ifdef DEBUG
1029a06587f5Smckusick 	if (pmapdebug & PDB_FOLLOW)
1030d8f00987Sralph 		printf("pmap_extract: pa %x\n", pa);
1031a06587f5Smckusick #endif
1032a06587f5Smckusick 	return (pa);
1033a06587f5Smckusick }
1034a06587f5Smckusick 
1035a06587f5Smckusick /*
1036a06587f5Smckusick  *	Copy the range specified by src_addr/len
1037a06587f5Smckusick  *	from the source map to the range dst_addr/len
1038a06587f5Smckusick  *	in the destination map.
1039a06587f5Smckusick  *
1040a06587f5Smckusick  *	This routine is only advisory and need not do anything.
1041a06587f5Smckusick  */
104249b812f9Sralph void
pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)104349b812f9Sralph pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1044a06587f5Smckusick 	pmap_t dst_pmap;
1045a06587f5Smckusick 	pmap_t src_pmap;
1046a06587f5Smckusick 	vm_offset_t dst_addr;
1047a06587f5Smckusick 	vm_size_t len;
1048a06587f5Smckusick 	vm_offset_t src_addr;
1049a06587f5Smckusick {
1050a06587f5Smckusick 
1051a06587f5Smckusick #ifdef DEBUG
1052a06587f5Smckusick 	if (pmapdebug & PDB_FOLLOW)
1053a06587f5Smckusick 		printf("pmap_copy(%x, %x, %x, %x, %x)\n",
1054a06587f5Smckusick 		       dst_pmap, src_pmap, dst_addr, len, src_addr);
1055a06587f5Smckusick #endif
1056a06587f5Smckusick }
1057a06587f5Smckusick 
1058a06587f5Smckusick /*
1059a06587f5Smckusick  *	Require that all active physical maps contain no
1060a06587f5Smckusick  *	incorrect entries NOW.  [This update includes
1061a06587f5Smckusick  *	forcing updates of any address map caching.]
1062a06587f5Smckusick  *
1063a06587f5Smckusick  *	Generally used to insure that a thread about
1064a06587f5Smckusick  *	to run will see a semantically correct world.
1065a06587f5Smckusick  */
106649b812f9Sralph void
pmap_update()106749b812f9Sralph pmap_update()
1068a06587f5Smckusick {
1069a06587f5Smckusick 
1070a06587f5Smckusick #ifdef DEBUG
1071a06587f5Smckusick 	if (pmapdebug & PDB_FOLLOW)
1072a06587f5Smckusick 		printf("pmap_update()\n");
1073a06587f5Smckusick #endif
1074a06587f5Smckusick }
1075a06587f5Smckusick 
1076a06587f5Smckusick /*
1077a06587f5Smckusick  *	Routine:	pmap_collect
1078a06587f5Smckusick  *	Function:
1079a06587f5Smckusick  *		Garbage collects the physical map system for
1080a06587f5Smckusick  *		pages which are no longer used.
1081a06587f5Smckusick  *		Success need not be guaranteed -- that is, there
1082a06587f5Smckusick  *		may well be pages which are not referenced, but
1083a06587f5Smckusick  *		others may be collected.
1084a06587f5Smckusick  *	Usage:
1085a06587f5Smckusick  *		Called by the pageout daemon when pages are scarce.
1086a06587f5Smckusick  */
1087a06587f5Smckusick void
pmap_collect(pmap)1088a06587f5Smckusick pmap_collect(pmap)
1089a06587f5Smckusick 	pmap_t pmap;
1090a06587f5Smckusick {
1091a06587f5Smckusick 
1092a06587f5Smckusick #ifdef DEBUG
1093a06587f5Smckusick 	if (pmapdebug & PDB_FOLLOW)
1094a06587f5Smckusick 		printf("pmap_collect(%x)\n", pmap);
1095a06587f5Smckusick #endif
1096a06587f5Smckusick }
1097a06587f5Smckusick 
1098a06587f5Smckusick /*
1099a06587f5Smckusick  *	pmap_zero_page zeros the specified (machine independent)
1100a06587f5Smckusick  *	page.
1101a06587f5Smckusick  */
1102a06587f5Smckusick void
pmap_zero_page(phys)1103a06587f5Smckusick pmap_zero_page(phys)
110449b812f9Sralph 	vm_offset_t phys;
1105a06587f5Smckusick {
110649b812f9Sralph 	register int *p, *end;
1107a06587f5Smckusick 
1108a06587f5Smckusick #ifdef DEBUG
1109a06587f5Smckusick 	if (pmapdebug & PDB_FOLLOW)
1110a06587f5Smckusick 		printf("pmap_zero_page(%x)\n", phys);
1111a06587f5Smckusick #endif
111249b812f9Sralph 	p = (int *)MACH_PHYS_TO_CACHED(phys);
111349b812f9Sralph 	end = p + PAGE_SIZE / sizeof(int);
1114a06587f5Smckusick 	do {
111549b812f9Sralph 		p[0] = 0;
111649b812f9Sralph 		p[1] = 0;
111749b812f9Sralph 		p[2] = 0;
111849b812f9Sralph 		p[3] = 0;
111949b812f9Sralph 		p += 4;
112049b812f9Sralph 	} while (p != end);
1121a06587f5Smckusick }
1122a06587f5Smckusick 
1123a06587f5Smckusick /*
1124a06587f5Smckusick  *	pmap_copy_page copies the specified (machine independent)
1125a06587f5Smckusick  *	page.
1126a06587f5Smckusick  */
1127a06587f5Smckusick void
pmap_copy_page(src,dst)1128a06587f5Smckusick pmap_copy_page(src, dst)
112949b812f9Sralph 	vm_offset_t src, dst;
1130a06587f5Smckusick {
113149b812f9Sralph 	register int *s, *d, *end;
113249b812f9Sralph 	register int tmp0, tmp1, tmp2, tmp3;
1133a06587f5Smckusick 
1134a06587f5Smckusick #ifdef DEBUG
1135a06587f5Smckusick 	if (pmapdebug & PDB_FOLLOW)
1136a06587f5Smckusick 		printf("pmap_copy_page(%x, %x)\n", src, dst);
1137a06587f5Smckusick #endif
113849b812f9Sralph 	s = (int *)MACH_PHYS_TO_CACHED(src);
113949b812f9Sralph 	d = (int *)MACH_PHYS_TO_CACHED(dst);
114049b812f9Sralph 	end = s + PAGE_SIZE / sizeof(int);
1141a06587f5Smckusick 	do {
114249b812f9Sralph 		tmp0 = s[0];
114349b812f9Sralph 		tmp1 = s[1];
114449b812f9Sralph 		tmp2 = s[2];
114549b812f9Sralph 		tmp3 = s[3];
114649b812f9Sralph 		d[0] = tmp0;
114749b812f9Sralph 		d[1] = tmp1;
114849b812f9Sralph 		d[2] = tmp2;
114949b812f9Sralph 		d[3] = tmp3;
115049b812f9Sralph 		s += 4;
115149b812f9Sralph 		d += 4;
115249b812f9Sralph 	} while (s != end);
1153a06587f5Smckusick }
1154a06587f5Smckusick 
1155a06587f5Smckusick /*
1156a06587f5Smckusick  *	Routine:	pmap_pageable
1157a06587f5Smckusick  *	Function:
1158a06587f5Smckusick  *		Make the specified pages (by pmap, offset)
1159a06587f5Smckusick  *		pageable (or not) as requested.
1160a06587f5Smckusick  *
1161a06587f5Smckusick  *		A page which is not pageable may not take
1162a06587f5Smckusick  *		a fault; therefore, its page table entry
1163a06587f5Smckusick  *		must remain valid for the duration.
1164a06587f5Smckusick  *
1165a06587f5Smckusick  *		This routine is merely advisory; pmap_enter
1166a06587f5Smckusick  *		will specify that these pages are to be wired
1167a06587f5Smckusick  *		down (or not) as appropriate.
1168a06587f5Smckusick  */
1169a06587f5Smckusick void
pmap_pageable(pmap,sva,eva,pageable)1170a06587f5Smckusick pmap_pageable(pmap, sva, eva, pageable)
1171a06587f5Smckusick 	pmap_t		pmap;
1172a06587f5Smckusick 	vm_offset_t	sva, eva;
1173a06587f5Smckusick 	boolean_t	pageable;
1174a06587f5Smckusick {
1175a06587f5Smckusick 
1176a06587f5Smckusick #ifdef DEBUG
1177a06587f5Smckusick 	if (pmapdebug & PDB_FOLLOW)
1178a06587f5Smckusick 		printf("pmap_pageable(%x, %x, %x, %x)\n",
1179a06587f5Smckusick 		       pmap, sva, eva, pageable);
1180a06587f5Smckusick #endif
1181a06587f5Smckusick }
1182a06587f5Smckusick 
1183a06587f5Smckusick /*
1184a06587f5Smckusick  *	Clear the modify bits on the specified physical page.
1185a06587f5Smckusick  */
1186a06587f5Smckusick void
pmap_clear_modify(pa)1187a06587f5Smckusick pmap_clear_modify(pa)
1188a06587f5Smckusick 	vm_offset_t pa;
1189a06587f5Smckusick {
1190a06587f5Smckusick 
1191a06587f5Smckusick #ifdef DEBUG
1192a06587f5Smckusick 	if (pmapdebug & PDB_FOLLOW)
1193a06587f5Smckusick 		printf("pmap_clear_modify(%x)\n", pa);
1194a06587f5Smckusick #endif
1195a06587f5Smckusick #ifdef ATTR
1196d8f00987Sralph 	pmap_attributes[atop(pa)] &= ~PMAP_ATTR_MOD;
1197a06587f5Smckusick #endif
1198a06587f5Smckusick }
1199a06587f5Smckusick 
1200a06587f5Smckusick /*
1201a06587f5Smckusick  *	pmap_clear_reference:
1202a06587f5Smckusick  *
1203a06587f5Smckusick  *	Clear the reference bit on the specified physical page.
1204a06587f5Smckusick  */
1205a06587f5Smckusick void
pmap_clear_reference(pa)1206a06587f5Smckusick pmap_clear_reference(pa)
1207a06587f5Smckusick 	vm_offset_t pa;
1208a06587f5Smckusick {
1209a06587f5Smckusick 
1210a06587f5Smckusick #ifdef DEBUG
1211a06587f5Smckusick 	if (pmapdebug & PDB_FOLLOW)
1212a06587f5Smckusick 		printf("pmap_clear_reference(%x)\n", pa);
1213a06587f5Smckusick #endif
1214a06587f5Smckusick #ifdef ATTR
1215d8f00987Sralph 	pmap_attributes[atop(pa)] &= ~PMAP_ATTR_REF;
1216a06587f5Smckusick #endif
1217a06587f5Smckusick }
1218a06587f5Smckusick 
1219a06587f5Smckusick /*
1220a06587f5Smckusick  *	pmap_is_referenced:
1221a06587f5Smckusick  *
1222a06587f5Smckusick  *	Return whether or not the specified physical page is referenced
1223a06587f5Smckusick  *	by any physical maps.
1224a06587f5Smckusick  */
1225a06587f5Smckusick boolean_t
pmap_is_referenced(pa)1226a06587f5Smckusick pmap_is_referenced(pa)
1227a06587f5Smckusick 	vm_offset_t pa;
1228a06587f5Smckusick {
1229a06587f5Smckusick #ifdef ATTR
1230d8f00987Sralph 	return (pmap_attributes[atop(pa)] & PMAP_ATTR_REF);
1231a06587f5Smckusick #else
1232a06587f5Smckusick 	return (FALSE);
1233a06587f5Smckusick #endif
1234a06587f5Smckusick }
1235a06587f5Smckusick 
1236a06587f5Smckusick /*
1237a06587f5Smckusick  *	pmap_is_modified:
1238a06587f5Smckusick  *
1239a06587f5Smckusick  *	Return whether or not the specified physical page is modified
1240a06587f5Smckusick  *	by any physical maps.
1241a06587f5Smckusick  */
1242a06587f5Smckusick boolean_t
pmap_is_modified(pa)1243a06587f5Smckusick pmap_is_modified(pa)
1244a06587f5Smckusick 	vm_offset_t pa;
1245a06587f5Smckusick {
1246a06587f5Smckusick #ifdef ATTR
1247d8f00987Sralph 	return (pmap_attributes[atop(pa)] & PMAP_ATTR_MOD);
1248a06587f5Smckusick #else
1249a06587f5Smckusick 	return (FALSE);
1250a06587f5Smckusick #endif
1251a06587f5Smckusick }
1252a06587f5Smckusick 
1253a06587f5Smckusick vm_offset_t
pmap_phys_address(ppn)1254a06587f5Smckusick pmap_phys_address(ppn)
1255a06587f5Smckusick 	int ppn;
1256a06587f5Smckusick {
1257a06587f5Smckusick 
1258a06587f5Smckusick #ifdef DEBUG
1259a06587f5Smckusick 	if (pmapdebug & PDB_FOLLOW)
1260a06587f5Smckusick 		printf("pmap_phys_address(%x)\n", ppn);
1261a06587f5Smckusick #endif
1262a06587f5Smckusick 	return (pmax_ptob(ppn));
1263a06587f5Smckusick }
1264a06587f5Smckusick 
1265a06587f5Smckusick /*
1266a06587f5Smckusick  * Miscellaneous support routines
1267a06587f5Smckusick  */
1268a06587f5Smckusick 
1269a06587f5Smckusick /*
1270a06587f5Smckusick  * Allocate a hardware PID and return it.
1271d8f00987Sralph  * It takes almost as much or more time to search the TLB for a
1272d8f00987Sralph  * specific PID and flush those entries as it does to flush the entire TLB.
1273d8f00987Sralph  * Therefore, when we allocate a new PID, we just take the next number. When
1274d8f00987Sralph  * we run out of numbers, we flush the TLB, increment the generation count
1275d8f00987Sralph  * and start over. PID zero is reserved for kernel use.
1276a7eb7347Sbostic  * This is called only by switch().
1277a06587f5Smckusick  */
1278a06587f5Smckusick int
pmap_alloc_tlbpid(p)1279a06587f5Smckusick pmap_alloc_tlbpid(p)
1280a06587f5Smckusick 	register struct proc *p;
1281a06587f5Smckusick {
1282a06587f5Smckusick 	register pmap_t pmap;
1283a06587f5Smckusick 	register int id;
1284a06587f5Smckusick 
1285a06587f5Smckusick 	pmap = &p->p_vmspace->vm_pmap;
1286d8f00987Sralph 	if (pmap->pm_tlbgen != tlbpid_gen) {
1287d8f00987Sralph 		id = tlbpid_cnt;
1288d8f00987Sralph 		if (id == VMMACH_NUM_PIDS) {
1289d8f00987Sralph 			MachTLBFlush();
1290d8f00987Sralph 			/* reserve tlbpid_gen == 0 to alway mean invalid */
1291d8f00987Sralph 			if (++tlbpid_gen == 0)
1292d8f00987Sralph 				tlbpid_gen = 1;
1293d8f00987Sralph 			id = 1;
1294a06587f5Smckusick 		}
1295d8f00987Sralph 		tlbpid_cnt = id + 1;
1296d8f00987Sralph 		pmap->pm_tlbpid = id;
1297d8f00987Sralph 		pmap->pm_tlbgen = tlbpid_gen;
1298d8f00987Sralph 	} else
1299d8f00987Sralph 		id = pmap->pm_tlbpid;
1300a06587f5Smckusick 
1301a06587f5Smckusick #ifdef DEBUG
1302d8f00987Sralph 	if (pmapdebug & (PDB_FOLLOW|PDB_TLBPID)) {
1303d8f00987Sralph 		if (curproc)
1304d8f00987Sralph 			printf("pmap_alloc_tlbpid: curproc %d '%s' ",
1305d8f00987Sralph 				curproc->p_pid, curproc->p_comm);
1306d8f00987Sralph 		else
1307d8f00987Sralph 			printf("pmap_alloc_tlbpid: curproc <none> ");
1308d8f00987Sralph 		printf("segtab %x tlbpid %d pid %d '%s'\n",
1309d8f00987Sralph 			pmap->pm_segtab, id, p->p_pid, p->p_comm);
1310d8f00987Sralph 	}
1311a06587f5Smckusick #endif
1312a06587f5Smckusick 	return (id);
1313a06587f5Smckusick }
1314a06587f5Smckusick 
1315a06587f5Smckusick /*
1316a06587f5Smckusick  * Remove a physical to virtual address translation.
1317a06587f5Smckusick  */
1318a06587f5Smckusick void
pmap_remove_pv(pmap,va,pa)1319a06587f5Smckusick pmap_remove_pv(pmap, va, pa)
1320a06587f5Smckusick 	pmap_t pmap;
1321a06587f5Smckusick 	vm_offset_t va, pa;
1322a06587f5Smckusick {
1323a06587f5Smckusick 	register pv_entry_t pv, npv;
1324a06587f5Smckusick 	int s;
1325a06587f5Smckusick 
1326a06587f5Smckusick #ifdef DEBUG
1327d8f00987Sralph 	if (pmapdebug & (PDB_FOLLOW|PDB_PVENTRY))
1328a06587f5Smckusick 		printf("pmap_remove_pv(%x, %x, %x)\n", pmap, va, pa);
1329a06587f5Smckusick #endif
1330a06587f5Smckusick 	/*
1331a06587f5Smckusick 	 * Remove page from the PV table (raise IPL since we
1332a06587f5Smckusick 	 * may be called at interrupt time).
1333a06587f5Smckusick 	 */
1334a06587f5Smckusick 	if (!IS_VM_PHYSADDR(pa))
1335a06587f5Smckusick 		return;
1336a06587f5Smckusick 	pv = pa_to_pvh(pa);
1337a06587f5Smckusick 	s = splimp();
1338a06587f5Smckusick 	/*
1339a06587f5Smckusick 	 * If it is the first entry on the list, it is actually
1340a06587f5Smckusick 	 * in the header and we must copy the following entry up
1341a06587f5Smckusick 	 * to the header.  Otherwise we must search the list for
1342a06587f5Smckusick 	 * the entry.  In either case we free the now unused entry.
1343a06587f5Smckusick 	 */
1344a06587f5Smckusick 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
1345a06587f5Smckusick 		npv = pv->pv_next;
1346a06587f5Smckusick 		if (npv) {
1347a06587f5Smckusick 			*pv = *npv;
1348a06587f5Smckusick 			free((caddr_t)npv, M_VMPVENT);
1349a06587f5Smckusick 		} else
1350a06587f5Smckusick 			pv->pv_pmap = NULL;
1351a06587f5Smckusick #ifdef DEBUG
1352a06587f5Smckusick 		remove_stats.pvfirst++;
1353a06587f5Smckusick #endif
1354a06587f5Smckusick 	} else {
1355a06587f5Smckusick 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
1356a06587f5Smckusick #ifdef DEBUG
1357a06587f5Smckusick 			remove_stats.pvsearch++;
1358a06587f5Smckusick #endif
1359a06587f5Smckusick 			if (pmap == npv->pv_pmap && va == npv->pv_va)
1360a06587f5Smckusick 				goto fnd;
1361a06587f5Smckusick 		}
1362a06587f5Smckusick #ifdef DIAGNOSTIC
1363a06587f5Smckusick 		printf("pmap_remove_pv(%x, %x, %x) not found\n", pmap, va, pa);
1364a06587f5Smckusick 		panic("pmap_remove_pv");
1365a06587f5Smckusick #endif
1366a06587f5Smckusick 	fnd:
1367a06587f5Smckusick 		pv->pv_next = npv->pv_next;
1368a06587f5Smckusick 		free((caddr_t)npv, M_VMPVENT);
1369a06587f5Smckusick 	}
1370a06587f5Smckusick 	splx(s);
1371a06587f5Smckusick }
1372a06587f5Smckusick 
1373d8f00987Sralph /*
1374d8f00987Sralph  *	vm_page_alloc1:
1375d8f00987Sralph  *
1376d8f00987Sralph  *	Allocate and return a memory cell with no associated object.
1377d8f00987Sralph  */
1378d8f00987Sralph vm_page_t
vm_page_alloc1()1379d8f00987Sralph vm_page_alloc1()
1380a06587f5Smckusick {
1381d8f00987Sralph 	register vm_page_t	mem;
1382d8f00987Sralph 	int		spl;
1383a06587f5Smckusick 
1384d8f00987Sralph 	spl = splimp();				/* XXX */
1385d8f00987Sralph 	simple_lock(&vm_page_queue_free_lock);
1386bdd902b7Smckusick 	if (vm_page_queue_free.tqh_first == NULL) {
1387d8f00987Sralph 		simple_unlock(&vm_page_queue_free_lock);
1388d8f00987Sralph 		splx(spl);
1389d8f00987Sralph 		return (NULL);
1390d8f00987Sralph 	}
1391a06587f5Smckusick 
1392bdd902b7Smckusick 	mem = vm_page_queue_free.tqh_first;
1393bdd902b7Smckusick 	TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
1394d8f00987Sralph 
1395d8f00987Sralph 	cnt.v_free_count--;
1396d8f00987Sralph 	simple_unlock(&vm_page_queue_free_lock);
1397d8f00987Sralph 	splx(spl);
1398d8f00987Sralph 
1399d8f00987Sralph 	mem->flags = PG_BUSY | PG_CLEAN | PG_FAKE;
1400d8f00987Sralph 	mem->wire_count = 0;
1401d8f00987Sralph 
1402d8f00987Sralph 	/*
1403d8f00987Sralph 	 *	Decide if we should poke the pageout daemon.
1404d8f00987Sralph 	 *	We do this if the free count is less than the low
1405d8f00987Sralph 	 *	water mark, or if the free count is less than the high
1406d8f00987Sralph 	 *	water mark (but above the low water mark) and the inactive
1407d8f00987Sralph 	 *	count is less than its target.
1408d8f00987Sralph 	 *
1409d8f00987Sralph 	 *	We don't have the counts locked ... if they change a little,
1410d8f00987Sralph 	 *	it doesn't really matter.
1411d8f00987Sralph 	 */
1412d8f00987Sralph 
1413d8f00987Sralph 	if (cnt.v_free_count < cnt.v_free_min ||
1414d8f00987Sralph 	    (cnt.v_free_count < cnt.v_free_target &&
1415d8f00987Sralph 	     cnt.v_inactive_count < cnt.v_inactive_target))
1416*759b7897Sralph 		thread_wakeup(&vm_pages_needed);
1417d8f00987Sralph 	return (mem);
1418a06587f5Smckusick }
1419d8f00987Sralph 
1420d8f00987Sralph /*
1421d8f00987Sralph  *	vm_page_free1:
1422d8f00987Sralph  *
1423d8f00987Sralph  *	Returns the given page to the free list,
1424d8f00987Sralph  *	disassociating it with any VM object.
1425d8f00987Sralph  *
1426d8f00987Sralph  *	Object and page must be locked prior to entry.
1427d8f00987Sralph  */
1428d8f00987Sralph void
vm_page_free1(mem)1429d8f00987Sralph vm_page_free1(mem)
1430d8f00987Sralph 	register vm_page_t	mem;
1431d8f00987Sralph {
1432d8f00987Sralph 
1433d8f00987Sralph 	if (mem->flags & PG_ACTIVE) {
1434bdd902b7Smckusick 		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
1435d8f00987Sralph 		mem->flags &= ~PG_ACTIVE;
1436d8f00987Sralph 		cnt.v_active_count--;
1437a06587f5Smckusick 	}
1438d8f00987Sralph 
1439d8f00987Sralph 	if (mem->flags & PG_INACTIVE) {
1440bdd902b7Smckusick 		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
1441d8f00987Sralph 		mem->flags &= ~PG_INACTIVE;
1442d8f00987Sralph 		cnt.v_inactive_count--;
1443d8f00987Sralph 	}
1444d8f00987Sralph 
1445d8f00987Sralph 	if (!(mem->flags & PG_FICTITIOUS)) {
1446d8f00987Sralph 		int	spl;
1447d8f00987Sralph 
1448d8f00987Sralph 		spl = splimp();
1449d8f00987Sralph 		simple_lock(&vm_page_queue_free_lock);
1450bdd902b7Smckusick 		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
1451d8f00987Sralph 
1452d8f00987Sralph 		cnt.v_free_count++;
1453d8f00987Sralph 		simple_unlock(&vm_page_queue_free_lock);
1454d8f00987Sralph 		splx(spl);
145570effb03Smckusick 	}
1456a06587f5Smckusick }
1457