xref: /original-bsd/sys/pmax/pmax/pmap.c (revision e59fb703)
1 /*
2  * Copyright (c) 1992 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department and Ralph Campbell.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)pmap.c	7.1 (Berkeley) 01/07/92
12  */
13 
14 /*
15  *	Manages physical address maps.
16  *
17  *	In addition to hardware address maps, this
18  *	module is called upon to provide software-use-only
19  *	maps which may or may not be stored in the same
20  *	form as hardware maps.  These pseudo-maps are
21  *	used to store intermediate results from copy
22  *	operations to and from address spaces.
23  *
24  *	Since the information managed by this module is
25  *	also stored by the logical address mapping module,
26  *	this module may throw away valid virtual-to-physical
27  *	mappings at almost any time.  However, invalidations
28  *	of virtual-to-physical mappings must be done as
29  *	requested.
30  *
31  *	In order to cope with hardware architectures which
32  *	make virtual-to-physical map invalidates expensive,
33  *	this module may delay invalidate or reduced protection
34  *	operations until such time as they are actually
35  *	necessary.  This module is given full information as
36  *	to which processors are currently using which maps,
37  *	and to when physical maps must be made correct.
38  */
39 
40 #include "param.h"
41 #include "proc.h"
42 #include "malloc.h"
43 #include "user.h"
44 
45 #include "vm/vm.h"
46 #include "vm/vm_kern.h"
47 #include "vm/vm_page.h"
48 
49 #include "../include/machConst.h"
50 #include "pte.h"
51 
52 /*
53  * For each vm_page_t, there is a list of all currently valid virtual
54  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
55  * XXX really should do this as a part of the higher level code.
56  */
57 typedef struct pv_entry {
58 	struct pv_entry	*pv_next;	/* next pv_entry */
59 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
60 	vm_offset_t	pv_va;		/* virtual address for mapping */
61 	int		pv_flags;	/* flags */
62 } *pv_entry_t;
63 
64 pv_entry_t	pv_table;	/* array of entries, one per page */
65 extern void	pmap_remove_pv();
66 
67 #define pa_index(pa)		atop((pa) - first_phys_addr)
68 #define pa_to_pvh(pa)		(&pv_table[pa_index(pa)])
69 
70 #ifdef DEBUG
71 struct {
72 	int kernel;	/* entering kernel mapping */
73 	int user;	/* entering user mapping */
74 	int ptpneeded;	/* needed to allocate a PT page */
75 	int pwchange;	/* no mapping change, just wiring or protection */
76 	int wchange;	/* no mapping change, just wiring */
77 	int mchange;	/* was mapped but mapping to different page */
78 	int managed;	/* a managed page */
79 	int firstpv;	/* first mapping for this PA */
80 	int secondpv;	/* second mapping for this PA */
81 	int ci;		/* cache inhibited */
82 	int unmanaged;	/* not a managed page */
83 	int flushes;	/* cache flushes */
84 	int cachehit;	/* new entry forced valid entry out */
85 } enter_stats;
86 struct {
87 	int calls;
88 	int removes;
89 	int flushes;
90 	int pidflushes;	/* HW pid stolen */
91 	int pvfirst;
92 	int pvsearch;
93 } remove_stats;
94 
95 int pmapdebug;
96 #define PDB_FOLLOW	0x0001
97 #define PDB_INIT	0x0002
98 #define PDB_ENTER	0x0004
99 #define PDB_REMOVE	0x0008
100 #define PDB_CREATE	0x0010
101 #define PDB_PTPAGE	0x0020
102 #define PDB_CACHE	0x0040
103 #define PDB_BITS	0x0080
104 #define PDB_COLLECT	0x0100
105 #define PDB_PROTECT	0x0200
106 #define PDB_TLBPID	0x0400
107 #define PDB_PARANOIA	0x2000
108 #define PDB_WIRING	0x4000
109 #define PDB_PVDUMP	0x8000
110 
111 #endif /* DEBUG */
112 
113 u_int	whichpids[2] = {	/* bit mask of hardware PID's in use */
114 	3, 0
115 };
116 
117 struct pmap	kernel_pmap_store;
118 pmap_t		kernel_pmap;
119 pmap_t		cur_pmap;	/* current pmap mapped in hardware */
120 
121 vm_offset_t    	avail_start;	/* PA of first available physical page */
122 vm_offset_t	avail_end;	/* PA of last available physical page */
123 vm_size_t	mem_size;	/* memory size in bytes */
124 vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
125 vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
126 int		pmaxpagesperpage;	/* PAGE_SIZE / NBPG */
127 #ifdef ATTR
128 char		*pmap_attributes;	/* reference and modify bits */
129 #endif
130 pmap_hash_t	zero_pmap_hash;		/* empty TLB hash table for init */
131 
132 /*
133  *	Bootstrap the system enough to run with virtual memory.
134  */
135 void
136 pmap_bootstrap(firstaddr)
137 	vm_offset_t firstaddr;
138 {
139 	register int i;
140 	vm_offset_t start = firstaddr;
141 	extern int maxmem, physmem;
142 
143 	/*
144 	 * Allocate a TLB hash table for the kernel.
145 	 * This could be a KSEG0 address and thus save TLB entries but
146 	 * its faster and simpler in assembly language to have a
147 	 * fixed address that can be accessed with a 16 bit signed offset.
148 	 * Note: the kernel pm_hash field is null, user pm_hash fields are
149 	 * either the table or zero_pmap_hash.
150 	 */
151 	kernel_pmap_store.pm_hash = (pmap_hash_t)0;
152 	for (i = 0; i < PMAP_HASH_KPAGES; i++) {
153 		MachTLBWriteIndexed(i + UPAGES + PMAP_HASH_UPAGES,
154 			PMAP_HASH_KADDR + (i << PGSHIFT),
155 			firstaddr | PG_V | PG_M | PG_G);
156 		firstaddr += NBPG;
157 	}
158 
159 	/*
160 	 * Allocate an empty TLB hash table for initial pmap's.
161 	 */
162 	zero_pmap_hash = (pmap_hash_t)firstaddr;
163 	firstaddr += PMAP_HASH_UPAGES * NBPG;
164 
165 	/* init proc[0]'s pmap hash table */
166 	for (i = 0; i < PMAP_HASH_UPAGES; i++) {
167 		kernel_pmap_store.pm_hash_ptes[i] =
168 			((u_int)zero_pmap_hash + (i << PGSHIFT)) | PG_V | PG_RO;
169 		MachTLBWriteIndexed(i + UPAGES,
170 			(PMAP_HASH_UADDR + (i << PGSHIFT)) |
171 				(1 << VMMACH_TLB_PID_SHIFT),
172 			kernel_pmap_store.pm_hash_ptes[i]);
173 	}
174 
175 	/*
176 	 * Allocate memory for pv_table.
177 	 * This will allocate more entries than we really need.
178 	 * We should do this in pmap_init when we know the actual
179 	 * phys_start and phys_end but its better to use phys addresses
180 	 * rather than kernel virtual addresses mapped through the TLB.
181 	 */
182 	i = (maxmem - pmax_btop(firstaddr)) * sizeof(struct pv_entry);
183 	i = pmax_round_page(i);
184 	pv_table = (pv_entry_t)firstaddr;
185 	firstaddr += i;
186 
187 	/*
188 	 * Clear allocated memory.
189 	 */
190 	bzero((caddr_t)start, firstaddr - start);
191 
192 	avail_start = firstaddr;
193 	avail_end = pmax_ptob(maxmem);
194 	mem_size = avail_end - avail_start;
195 
196 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
197 	virtual_end = VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES * NPTEPG * NBPG;
198 	/* XXX need to decide how to set cnt.v_page_size */
199 	pmaxpagesperpage = 1;
200 
201 	/*
202 	 * The kernel's pmap is statically allocated so we don't
203 	 * have to use pmap_create, which is unlikely to work
204 	 * correctly at this part of the boot sequence.
205 	 */
206 	kernel_pmap = cur_pmap = &kernel_pmap_store;
207 	simple_lock_init(&kernel_pmap->pm_lock);
208 	kernel_pmap->pm_count = 1;
209 }
210 
211 /*
212  * Bootstrap memory allocator. This function allows for early dynamic
213  * memory allocation until the virtual memory system has been bootstrapped.
214  * After that point, either kmem_alloc or malloc should be used. This
215  * function works by stealing pages from the (to be) managed page pool,
216  * stealing virtual address space, then mapping the pages and zeroing them.
217  *
218  * It should be used from pmap_bootstrap till vm_page_startup, afterwards
219  * it cannot be used, and will generate a panic if tried. Note that this
220  * memory will never be freed, and in essence it is wired down.
221  */
222 void *
223 pmap_bootstrap_alloc(size)
224 	int size;
225 {
226 	vm_offset_t val;
227 	extern boolean_t vm_page_startup_initialized;
228 
229 	if (vm_page_startup_initialized)
230 		panic("pmap_bootstrap_alloc: called after startup initialized");
231 
232 	val = avail_start;
233 	size = round_page(size);
234 	avail_start += size;
235 
236 	blkclr((caddr_t) val, size);
237 	return ((void *) val);
238 }
239 
240 /*
241  *	Initialize the pmap module.
242  *	Called by vm_init, to initialize any structures that the pmap
243  *	system needs to map virtual memory.
244  */
245 void
246 pmap_init(phys_start, phys_end)
247 	vm_offset_t phys_start, phys_end;
248 {
249 
250 #ifdef DEBUG
251 	if (pmapdebug & PDB_FOLLOW)
252 		printf("pmap_init(%x, %x)\n", phys_start, phys_end);
253 #endif
254 }
255 
256 /*
257  *	Used to map a range of physical addresses into kernel
258  *	virtual address space.
259  *
260  *	This routine should only be called by vm_page_startup()
261  *	with KSEG0 addresses.
262  */
263 vm_offset_t
264 pmap_map(virt, start, end, prot)
265 	vm_offset_t virt;
266 	vm_offset_t start;
267 	vm_offset_t end;
268 	int prot;
269 {
270 
271 #ifdef DEBUG
272 	if (pmapdebug & PDB_FOLLOW)
273 		printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
274 #endif
275 
276 	return(round_page(end));
277 }
278 
279 /*
280  *	Create and return a physical map.
281  *
282  *	If the size specified for the map
283  *	is zero, the map is an actual physical
284  *	map, and may be referenced by the
285  *	hardware.
286  *
287  *	If the size specified is non-zero,
288  *	the map will be used in software only, and
289  *	is bounded by that size.
290  */
291 pmap_t
292 pmap_create(size)
293 	vm_size_t size;
294 {
295 	register pmap_t pmap;
296 
297 #ifdef DEBUG
298 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
299 		printf("pmap_create(%x)\n", size);
300 #endif
301 	/*
302 	 * Software use map does not need a pmap
303 	 */
304 	if (size)
305 		return(NULL);
306 
307 	printf("pmap_create(%x) XXX\n", size); /* XXX */
308 	/* XXX: is it ok to wait here? */
309 	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
310 #ifdef notifwewait
311 	if (pmap == NULL)
312 		panic("pmap_create: cannot allocate a pmap");
313 #endif
314 	bzero(pmap, sizeof(*pmap));
315 	pmap_pinit(pmap);
316 	return (pmap);
317 }
318 
319 /*
320  * Initialize a preallocated and zeroed pmap structure,
321  * such as one in a vmspace structure.
322  */
323 void
324 pmap_pinit(pmap)
325 	register struct pmap *pmap;
326 {
327 	register int i;
328 	extern struct vmspace vmspace0;
329 
330 #ifdef DEBUG
331 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
332 		printf("pmap_pinit(%x)\n", pmap);
333 #endif
334 	simple_lock_init(&pmap->pm_lock);
335 	pmap->pm_count = 1;
336 	pmap->pm_flags = 0;
337 	pmap->pm_hash = zero_pmap_hash;
338 	for (i = 0; i < PMAP_HASH_UPAGES; i++)
339 		pmap->pm_hash_ptes[i] =
340 			((u_int)zero_pmap_hash + (i << PGSHIFT)) | PG_V | PG_RO;
341 	if (pmap == &vmspace0.vm_pmap)
342 		pmap->pm_tlbpid = 1;	/* preallocated in mach_init() */
343 	else
344 		pmap->pm_tlbpid = -1;	/* none allocated yet */
345 }
346 
347 /*
348  *	Retire the given physical map from service.
349  *	Should only be called if the map contains
350  *	no valid mappings.
351  */
352 void
353 pmap_destroy(pmap)
354 	register pmap_t pmap;
355 {
356 	int count;
357 
358 #ifdef DEBUG
359 	if (pmapdebug & PDB_FOLLOW)
360 		printf("pmap_destroy(%x)\n", pmap);
361 #endif
362 	if (pmap == NULL)
363 		return;
364 
365 	printf("pmap_destroy(%x) XXX\n", pmap); /* XXX */
366 	simple_lock(&pmap->pm_lock);
367 	count = --pmap->pm_count;
368 	simple_unlock(&pmap->pm_lock);
369 	if (count == 0) {
370 		pmap_release(pmap);
371 		free((caddr_t)pmap, M_VMPMAP);
372 	}
373 }
374 
375 /*
376  * Release any resources held by the given physical map.
377  * Called when a pmap initialized by pmap_pinit is being released.
378  * Should only be called if the map contains no valid mappings.
379  */
380 void
381 pmap_release(pmap)
382 	register pmap_t pmap;
383 {
384 	register int id;
385 #ifdef DIAGNOSTIC
386 	register int i;
387 #endif
388 
389 #ifdef DEBUG
390 	if (pmapdebug & PDB_FOLLOW)
391 		printf("pmap_release(%x)\n", pmap);
392 #endif
393 
394 	if (pmap->pm_hash && pmap->pm_hash != zero_pmap_hash) {
395 		kmem_free(kernel_map, (vm_offset_t)pmap->pm_hash,
396 			PMAP_HASH_SIZE);
397 		pmap->pm_hash = zero_pmap_hash;
398 	}
399 	if ((id = pmap->pm_tlbpid) < 0)
400 		return;
401 #ifdef DIAGNOSTIC
402 	if (!(whichpids[id >> 5] & (1 << (id & 0x1F))))
403 		panic("pmap_release: id free");
404 #endif
405 	MachTLBFlushPID(id);
406 	whichpids[id >> 5] &= ~(1 << (id & 0x1F));
407 	pmap->pm_flags &= ~PM_MODIFIED;
408 	pmap->pm_tlbpid = -1;
409 	if (pmap == cur_pmap)
410 		cur_pmap = (pmap_t)0;
411 #ifdef DIAGNOSTIC
412 	/* invalidate user PTE cache */
413 	for (i = 0; i < PMAP_HASH_UPAGES; i++)
414 		MachTLBWriteIndexed(i + UPAGES, MACH_RESERVED_ADDR, 0);
415 #endif
416 }
417 
418 /*
419  *	Add a reference to the specified pmap.
420  */
421 void
422 pmap_reference(pmap)
423 	pmap_t pmap;
424 {
425 
426 #ifdef DEBUG
427 	if (pmapdebug & PDB_FOLLOW)
428 		printf("pmap_reference(%x)\n", pmap);
429 #endif
430 	if (pmap != NULL) {
431 		simple_lock(&pmap->pm_lock);
432 		pmap->pm_count++;
433 		simple_unlock(&pmap->pm_lock);
434 	}
435 }
436 
437 /*
438  *	Remove the given range of addresses from the specified map.
439  *
440  *	It is assumed that the start and end are properly
441  *	rounded to the page size.
442  */
443 void
444 pmap_remove(pmap, sva, eva)
445 	register pmap_t pmap;
446 	vm_offset_t sva, eva;
447 {
448 	register vm_offset_t va;
449 	register pv_entry_t pv, npv;
450 	pmap_hash_t hp;
451 	unsigned entry;
452 
453 #ifdef DEBUG
454 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
455 		printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
456 	remove_stats.calls++;
457 #endif
458 	if (pmap == NULL)
459 		return;
460 
461 	/* anything in the cache? */
462 	if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash)
463 		return;
464 
465 	if (!pmap->pm_hash) {
466 		register pt_entry_t *pte;
467 
468 		/* remove entries from kernel pmap */
469 		pte = kvtopte(sva);
470 		for (va = sva; va < eva; va += NBPG, pte++) {
471 			entry = pte->pt_entry;
472 			if (!(entry & PG_V))
473 				continue;
474 			if (entry & PG_WIRED)
475 				pmap->pm_stats.wired_count--;
476 			pmap->pm_stats.resident_count--;
477 			pmap_remove_pv(pmap, va, entry & PG_FRAME);
478 #ifdef ATTR
479 			pmap_attributes[atop(entry - KERNBASE)] = 0;
480 #endif
481 			pte->pt_entry = PG_NV;
482 			/*
483 			 * Flush the TLB for the given address.
484 			 */
485 			MachTLBFlushAddr(va);
486 #ifdef DEBUG
487 			remove_stats.flushes++;
488 #endif
489 		}
490 		return;
491 	}
492 
493 	va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
494 	eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
495 	/*
496 	 * If we are not in the current address space, just flush the
497 	 * software cache and not the hardware.
498 	 */
499 	if (pmap != cur_pmap) {
500 		for (; va < eva; va += NBPG) {
501 			hp = &pmap->pm_hash[PMAP_HASH(va)];
502 			if (hp->high != va)
503 				continue;
504 
505 			hp->high = 0;
506 			entry = hp->low;
507 			if (entry & PG_WIRED)
508 				pmap->pm_stats.wired_count--;
509 			pmap->pm_stats.resident_count--;
510 			pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME);
511 #ifdef ATTR
512 			pmap_attributes[atop(entry - KERNBASE)] = 0;
513 #endif
514 			pmap->pm_flags |= PM_MODIFIED;
515 #ifdef DEBUG
516 			remove_stats.removes++;
517 #endif
518 		}
519 		return;
520 	}
521 
522 	for (; va < eva; va += NBPG) {
523 		hp = &pmap->pm_hash[PMAP_HASH(va)];
524 		if (hp->high != va)
525 			continue;
526 
527 		hp->high = 0;
528 		entry = hp->low;
529 		if (entry & PG_WIRED)
530 			pmap->pm_stats.wired_count--;
531 		pmap->pm_stats.resident_count--;
532 		pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME);
533 #ifdef ATTR
534 		pmap_attributes[atop(entry - KERNBASE)] = 0;
535 #endif
536 		/*
537 		 * Flush the TLB for the given address.
538 		 */
539 		MachTLBFlushAddr(va);
540 #ifdef DEBUG
541 		remove_stats.flushes++;
542 #endif
543 	}
544 }
545 
546 /*
547  *	pmap_page_protect:
548  *
549  *	Lower the permission for all mappings to a given page.
550  */
551 void
552 pmap_page_protect(pa, prot)
553 	vm_offset_t pa;
554 	vm_prot_t prot;
555 {
556 	register pv_entry_t pv;
557 	register vm_offset_t va;
558 	int s;
559 
560 #ifdef DEBUG
561 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
562 	    prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
563 		printf("pmap_page_protect(%x, %x)\n", pa, prot);
564 #endif
565 	if (!IS_VM_PHYSADDR(pa))
566 		return;
567 
568 	switch (prot) {
569 	case VM_PROT_ALL:
570 		break;
571 
572 	/* copy_on_write */
573 	case VM_PROT_READ:
574 	case VM_PROT_READ|VM_PROT_EXECUTE:
575 		pv = pa_to_pvh(pa);
576 		s = splimp();
577 		/*
578 		 * Loop over all current mappings setting/clearing as appropos.
579 		 */
580 		if (pv->pv_pmap != NULL) {
581 			for (; pv; pv = pv->pv_next) {
582 				extern vm_offset_t pager_sva, pager_eva;
583 				va = pv->pv_va;
584 
585 				/*
586 				 * XXX don't write protect pager mappings
587 				 */
588 				if (va >= pager_sva && va < pager_eva)
589 					continue;
590 				pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
591 					prot);
592 			}
593 		}
594 		splx(s);
595 		break;
596 
597 	/* remove_all */
598 	default:
599 		pv = pa_to_pvh(pa);
600 		s = splimp();
601 		while (pv->pv_pmap != NULL) {
602 			pmap_remove(pv->pv_pmap, pv->pv_va,
603 				    pv->pv_va + PAGE_SIZE);
604 		}
605 		splx(s);
606 	}
607 }
608 
609 /*
610  *	Set the physical protection on the
611  *	specified range of this map as requested.
612  */
613 void
614 pmap_protect(pmap, sva, eva, prot)
615 	register pmap_t pmap;
616 	vm_offset_t sva, eva;
617 	vm_prot_t prot;
618 {
619 	register vm_offset_t va;
620 	pmap_hash_t hp;
621 	u_int p;
622 
623 #ifdef DEBUG
624 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
625 		printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
626 #endif
627 	if (pmap == NULL)
628 		return;
629 
630 	/* anything in the software cache? */
631 	if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash)
632 		return;
633 
634 	if (!(prot & VM_PROT_READ)) {
635 		pmap_remove(pmap, sva, eva);
636 		return;
637 	}
638 
639 	if (!pmap->pm_hash) {
640 		register pt_entry_t *pte;
641 
642 		/*
643 		 * Change entries in kernel pmap.
644 		 * This will trap if the page is writeable (in order to set
645 		 * the dirty bit) even if the dirty bit is already set. The
646 		 * optimization isn't worth the effort since this code isn't
647 		 * executed much. The common case is to make a user page
648 		 * read-only.
649 		 */
650 		p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
651 		pte = kvtopte(sva);
652 		for (va = sva; va < eva; va += NBPG, pte++) {
653 			if (!(pte->pt_entry & PG_V))
654 				continue;
655 			pte->pt_entry = (pte->pt_entry & ~(PG_M | PG_RO)) | p;
656 			/*
657 			 * Update the TLB if the given address is in the cache.
658 			 */
659 			MachTLBUpdate(va, pte->pt_entry);
660 		}
661 		return;
662 	}
663 
664 	p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
665 	va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
666 	eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
667 	/*
668 	 * If we are not in the current address space, just flush the
669 	 * software cache and not the hardware.
670 	 */
671 	if (pmap != cur_pmap) {
672 		for (; va < eva; va += NBPG) {
673 			hp = &pmap->pm_hash[PMAP_HASH(va)];
674 			if (hp->high != va)
675 				continue;
676 
677 			hp->low = (hp->low & ~(PG_M | PG_RO)) | p;
678 			pmap->pm_flags |= PM_MODIFIED;
679 		}
680 		return;
681 	}
682 
683 	for (; va < eva; va += NBPG) {
684 		hp = &pmap->pm_hash[PMAP_HASH(va)];
685 		if (hp->high != va)
686 			continue;
687 
688 		hp->low = (hp->low & ~(PG_M | PG_RO)) | p;
689 		/*
690 		 * Update the TLB if the given address is in the cache.
691 		 */
692 		MachTLBUpdate(hp->high, hp->low);
693 	}
694 }
695 
696 /*
697  *	Insert the given physical page (p) at
698  *	the specified virtual address (v) in the
699  *	target physical map with the protection requested.
700  *
701  *	If specified, the page will be wired down, meaning
702  *	that the related pte can not be reclaimed.
703  *
704  *	NB:  This is the only routine which MAY NOT lazy-evaluate
705  *	or lose information.  That is, this routine must actually
706  *	insert this page into the given map NOW.
707  */
708 void
709 pmap_enter(pmap, va, pa, prot, wired)
710 	register pmap_t pmap;
711 	vm_offset_t va;
712 	register vm_offset_t pa;
713 	vm_prot_t prot;
714 	boolean_t wired;
715 {
716 	register pmap_hash_t hp;
717 	register u_int npte;
718 	register int i;
719 
720 #ifdef DEBUG
721 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
722 		printf("pmap_enter(%x, %x, %x, %x, %x)\n",
723 		       pmap, va, pa, prot, wired);
724 #endif
725 #ifdef DIAGNOSTIC
726 	if (!pmap)
727 		panic("pmap_enter: pmap");
728 	if (pmap->pm_tlbpid < 0)
729 		panic("pmap_enter: tlbpid");
730 	if (pmap == kernel_pmap) {
731 		enter_stats.kernel++;
732 		if ((va & 0xE0000000) != 0xC0000000)
733 			panic("pmap_enter: kva");
734 	} else {
735 		enter_stats.user++;
736 		if (va & 0x80000000)
737 			panic("pmap_enter: uva");
738 	}
739 	if (!(prot & VM_PROT_READ))
740 		panic("pmap_enter: prot");
741 #endif
742 
743 	/*
744 	 * See if we need to create a new TLB cache.
745 	 */
746 	if (pmap->pm_hash == zero_pmap_hash) {
747 		register vm_offset_t kva;
748 		register pt_entry_t *pte;
749 
750 		kva = kmem_alloc(kernel_map, PMAP_HASH_SIZE);
751 		pmap->pm_hash = (pmap_hash_t)kva;
752 
753 		/*
754 		 * Convert the kernel virtual address to a physical one
755 		 * and cache it in the pmap. Note: if the phyical address
756 		 * can change (due to memory compaction in kmem_alloc?),
757 		 * we will have to update things.
758 		 */
759 		pte = kvtopte(kva);
760 		for (i = 0; i < PMAP_HASH_UPAGES; i++) {
761 			pmap->pm_hash_ptes[i] = pte->pt_entry & ~PG_G;
762 			pte++;
763 		}
764 
765 		/*
766 		 * Map in new TLB cache if it is current.
767 		 */
768 		if (pmap == cur_pmap) {
769 #ifdef DIAGNOSTIC
770 			if (pmap->pm_tlbpid < 0)
771 				panic("pmap_enter: tlbpid");
772 #endif
773 			for (i = 0; i < PMAP_HASH_UPAGES; i++) {
774 				MachTLBWriteIndexed(i + UPAGES,
775 					(PMAP_HASH_UADDR + (i << PGSHIFT)) |
776 						(pmap->pm_tlbpid  <<
777 						VMMACH_TLB_PID_SHIFT),
778 					pmap->pm_hash_ptes[i]);
779 			}
780 		}
781 #ifdef DIAGNOSTIC
782 		for (i = 0; i < PAGE_SIZE; i += sizeof(int), kva += sizeof(int))
783 			if (*(int *)kva != 0)
784 				panic("pmap_enter: *kva != 0");
785 #endif
786 	}
787 
788 	if (IS_VM_PHYSADDR(pa)) {
789 		register pv_entry_t pv, npv;
790 		int s;
791 
792 		if (!(prot & VM_PROT_WRITE))
793 			npte = PG_RO;
794 		else {
795 			register vm_page_t mem;
796 
797 			mem = PHYS_TO_VM_PAGE(pa);
798 			if ((int)va < 0) {
799 				/*
800 				 * Don't bother to trap on kernel writes,
801 				 * just record page as dirty.
802 				 */
803 				npte = PG_M;
804 				mem->clean = FALSE;
805 			} else
806 #ifdef ATTR
807 				if ((pmap_attributes[atop(pa - KERNBASE)] &
808 				    PMAP_ATTR_MOD) || !mem->clean)
809 #else
810 				if (!mem->clean)
811 #endif
812 					npte = PG_M;
813 			else
814 				npte = 0;
815 		}
816 
817 #ifdef DEBUG
818 		enter_stats.managed++;
819 #endif
820 		/*
821 		 * Enter the pmap and virtual address into the
822 		 * physical to virtual map table.
823 		 */
824 		pv = pa_to_pvh(pa);
825 		s = splimp();
826 #ifdef DEBUG
827 		if (pmapdebug & PDB_ENTER)
828 			printf("pmap_enter: pv %x: was %x/%x/%x\n",
829 			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
830 #endif
831 		if (pv->pv_pmap == NULL) {
832 			/*
833 			 * No entries yet, use header as the first entry
834 			 */
835 #ifdef DEBUG
836 			enter_stats.firstpv++;
837 #endif
838 			pv->pv_va = va;
839 			pv->pv_pmap = pmap;
840 			pv->pv_next = NULL;
841 			pv->pv_flags = 0;
842 		} else {
843 			/*
844 			 * There is at least one other VA mapping this page.
845 			 * Place this entry after the header.
846 			 *
847 			 * Note: the entry may already be in the table if
848 			 * we are only changing the protection bits.
849 			 */
850 			for (npv = pv; npv; npv = npv->pv_next)
851 				if (pmap == npv->pv_pmap && va == npv->pv_va) {
852 #ifdef DIAGNOSTIC
853 				    if (!pmap->pm_hash) {
854 					unsigned entry;
855 
856 					entry = kvtopte(va)->pt_entry;
857 					if (!(entry & PG_V) ||
858 					    (entry & PG_FRAME) != pa)
859 			printf("found kva %x pa %x in pv_table but != %x\n",
860 				va, pa, entry);
861 				    } else {
862 					hp = &pmap->pm_hash[PMAP_HASH(va)];
863 					if (hp->high != (va |
864 					    (pmap->pm_tlbpid <<
865 					    VMMACH_TLB_PID_SHIFT)) ||
866 					    (hp->low & PG_FRAME) != pa)
867 			printf("found va %x pa %x in pv_table but != %x %x\n",
868 				va, pa, hp->high, hp->low);
869 				    }
870 #endif
871 					goto fnd;
872 				}
873 			/* can this cause us to recurse forever? */
874 			npv = (pv_entry_t)
875 				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
876 			npv->pv_va = va;
877 			npv->pv_pmap = pmap;
878 			npv->pv_next = pv->pv_next;
879 			pv->pv_next = npv;
880 #ifdef DEBUG
881 			if (!npv->pv_next)
882 				enter_stats.secondpv++;
883 #endif
884 		fnd:
885 			;
886 		}
887 		splx(s);
888 	} else {
889 		/*
890 		 * Assumption: if it is not part of our managed memory
891 		 * then it must be device memory which may be volitile.
892 		 */
893 #ifdef DEBUG
894 		enter_stats.unmanaged++;
895 #endif
896 		printf("pmap_enter: UNMANAGED ADDRESS va %x pa %x\n",
897 			va, pa); /* XXX */
898 		npte = (prot & VM_PROT_WRITE) ? PG_M : PG_RO;
899 	}
900 
901 	if (!pmap->pm_hash) {
902 		register pt_entry_t *pte;
903 
904 		/* enter entries into kernel pmap */
905 		pte = kvtopte(va);
906 		npte |= pa | PG_V | PG_G;
907 		if (wired) {
908 			pmap->pm_stats.wired_count += pmaxpagesperpage;
909 			npte |= PG_WIRED;
910 		}
911 		i = pmaxpagesperpage;
912 		do {
913 			if (!(pte->pt_entry & PG_V)) {
914 				pmap->pm_stats.resident_count++;
915 				MachTLBWriteRandom(va, npte);
916 			} else {
917 				/*
918 				 * Update the same virtual address entry.
919 				 */
920 				MachTLBUpdate(va, npte);
921 			}
922 			pte->pt_entry = npte;
923 			va += NBPG;
924 			npte += NBPG;
925 			pte++;
926 		} while (--i != 0);
927 		return;
928 	}
929 
930 	/*
931 	 * Now validate mapping with desired protection/wiring.
932 	 * Assume uniform modified and referenced status for all
933 	 * PMAX pages in a MACH page.
934 	 */
935 	npte |= pa | PG_V;
936 	if (wired) {
937 		pmap->pm_stats.wired_count += pmaxpagesperpage;
938 		npte |= PG_WIRED;
939 	}
940 #ifdef DEBUG
941 	if (pmapdebug & PDB_ENTER)
942 		printf("pmap_enter: new pte value %x\n", npte);
943 #endif
944 	va |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
945 	i = pmaxpagesperpage;
946 	do {
947 		hp = &pmap->pm_hash[PMAP_HASH(va)];
948 		if (!hp->high) {
949 			pmap->pm_stats.resident_count++;
950 			hp->high = va;
951 			hp->low = npte;
952 			MachTLBWriteRandom(va, npte);
953 		} else {
954 #ifdef DEBUG
955 			enter_stats.cachehit++;
956 #endif
957 			if (hp->high == va) {
958 				/*
959 				 * Update the same entry.
960 				 */
961 				hp->low = npte;
962 				MachTLBUpdate(va, npte);
963 			} else if (!(hp->low & PG_WIRED)) {
964 				MachTLBFlushAddr(hp->high);
965 				pmap_remove_pv(pmap, hp->high & PG_FRAME,
966 					hp->low & PG_FRAME);
967 				hp->high = va;
968 				hp->low = npte;
969 				MachTLBWriteRandom(va, npte);
970 			} else {
971 				/*
972 				 * Don't replace wired entries, just update
973 				 * the hardware TLB.
974 				 * Bug: routines to flush the TLB won't know
975 				 * that the entry is in the hardware.
976 				 */
977 				printf("pmap_enter: wired va %x %x\n", va,
978 					hp->low); /* XXX */
979 				panic("pmap_enter: wired"); /* XXX */
980 				MachTLBWriteRandom(va, npte);
981 			}
982 		}
983 		va += NBPG;
984 		npte += NBPG;
985 	} while (--i != 0);
986 }
987 
988 /*
989  *	Routine:	pmap_change_wiring
990  *	Function:	Change the wiring attribute for a map/virtual-address
991  *			pair.
992  *	In/out conditions:
993  *			The mapping must already exist in the pmap.
994  */
995 void
996 pmap_change_wiring(pmap, va, wired)
997 	register pmap_t	pmap;
998 	vm_offset_t va;
999 	boolean_t wired;
1000 {
1001 	register pmap_hash_t hp;
1002 	u_int p;
1003 	int i;
1004 
1005 #ifdef DEBUG
1006 	if (pmapdebug & PDB_FOLLOW)
1007 		printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);
1008 #endif
1009 	if (pmap == NULL)
1010 		return;
1011 
1012 	p = wired ? PG_WIRED : 0;
1013 
1014 	/*
1015 	 * Don't need to flush the TLB since PG_WIRED is only in software.
1016 	 */
1017 	if (!pmap->pm_hash) {
1018 		register pt_entry_t *pte;
1019 
1020 		/* change entries in kernel pmap */
1021 		pte = kvtopte(va);
1022 		i = pmaxpagesperpage;
1023 		if (!(pte->pt_entry & PG_WIRED) && p)
1024 			pmap->pm_stats.wired_count += i;
1025 		else if ((pte->pt_entry & PG_WIRED) && !p)
1026 			pmap->pm_stats.wired_count -= i;
1027 		do {
1028 			if (!(pte->pt_entry & PG_V))
1029 				continue;
1030 			pte->pt_entry = (pte->pt_entry & ~PG_WIRED) | p;
1031 			pte++;
1032 		} while (--i != 0);
1033 	} else if (pmap->pm_tlbpid >= 0 && pmap->pm_hash != zero_pmap_hash) {
1034 		i = pmaxpagesperpage;
1035 		do {
1036 			hp = &pmap->pm_hash[PMAP_HASH(va)];
1037 			if (!hp->high)
1038 				continue;
1039 			if (!(hp->low & PG_WIRED) && p)
1040 				pmap->pm_stats.wired_count++;
1041 			else if ((hp->low & PG_WIRED) && !p)
1042 				pmap->pm_stats.wired_count--;
1043 			hp->low = (hp->low & ~PG_WIRED) | p;
1044 			va += NBPG;
1045 		} while (--i != 0);
1046 	}
1047 }
1048 
1049 /*
1050  *	Routine:	pmap_extract
1051  *	Function:
1052  *		Extract the physical page address associated
1053  *		with the given map/virtual_address pair.
1054  */
1055 vm_offset_t
1056 pmap_extract(pmap, va)
1057 	register pmap_t	pmap;
1058 	vm_offset_t va;
1059 {
1060 	register vm_offset_t pa;
1061 	register pmap_hash_t hp;
1062 
1063 #ifdef DEBUG
1064 	if (pmapdebug & PDB_FOLLOW)
1065 		printf("pmap_extract(%x, %x) -> ", pmap, va);
1066 #endif
1067 
1068 	if (!pmap->pm_hash)
1069 		pa = kvtopte(va)->pt_entry & PG_FRAME;
1070 	else if (pmap->pm_tlbpid >= 0) {
1071 		hp = &pmap->pm_hash[PMAP_HASH(va)];
1072 		if (hp->high)
1073 			pa = hp->low & PG_FRAME;
1074 		else
1075 			pa = 0;
1076 	} else
1077 		pa = 0;
1078 
1079 #ifdef DEBUG
1080 	if (pmapdebug & PDB_FOLLOW)
1081 		printf("%x\n", pa);
1082 #endif
1083 	return(pa);
1084 }
1085 
1086 /*
1087  *	Copy the range specified by src_addr/len
1088  *	from the source map to the range dst_addr/len
1089  *	in the destination map.
1090  *
1091  *	This routine is only advisory and need not do anything.
1092  */
1093 void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1094 	pmap_t dst_pmap;
1095 	pmap_t src_pmap;
1096 	vm_offset_t dst_addr;
1097 	vm_size_t len;
1098 	vm_offset_t src_addr;
1099 {
1100 
1101 #ifdef DEBUG
1102 	if (pmapdebug & PDB_FOLLOW)
1103 		printf("pmap_copy(%x, %x, %x, %x, %x)\n",
1104 		       dst_pmap, src_pmap, dst_addr, len, src_addr);
1105 #endif
1106 }
1107 
1108 /*
1109  *	Require that all active physical maps contain no
1110  *	incorrect entries NOW.  [This update includes
1111  *	forcing updates of any address map caching.]
1112  *
1113  *	Generally used to insure that a thread about
1114  *	to run will see a semantically correct world.
1115  */
1116 void pmap_update()
1117 {
1118 
1119 #ifdef DEBUG
1120 	if (pmapdebug & PDB_FOLLOW)
1121 		printf("pmap_update()\n");
1122 #endif
1123 }
1124 
1125 /*
1126  *	Routine:	pmap_collect
1127  *	Function:
1128  *		Garbage collects the physical map system for
1129  *		pages which are no longer used.
1130  *		Success need not be guaranteed -- that is, there
1131  *		may well be pages which are not referenced, but
1132  *		others may be collected.
1133  *	Usage:
1134  *		Called by the pageout daemon when pages are scarce.
1135  */
1136 void
1137 pmap_collect(pmap)
1138 	pmap_t pmap;
1139 {
1140 
1141 #ifdef DEBUG
1142 	if (pmapdebug & PDB_FOLLOW)
1143 		printf("pmap_collect(%x)\n", pmap);
1144 #endif
1145 }
1146 
1147 /*
1148  *	pmap_zero_page zeros the specified (machine independent)
1149  *	page.
1150  */
1151 void
1152 pmap_zero_page(phys)
1153 	register vm_offset_t phys;
1154 {
1155 	register vm_offset_t end;
1156 
1157 #ifdef DEBUG
1158 	if (pmapdebug & PDB_FOLLOW)
1159 		printf("pmap_zero_page(%x)\n", phys);
1160 #endif
1161 	end = phys + PAGE_SIZE;
1162 	do {
1163 		((unsigned *)phys)[0] = 0;
1164 		((unsigned *)phys)[1] = 0;
1165 		((unsigned *)phys)[2] = 0;
1166 		((unsigned *)phys)[3] = 0;
1167 		phys += 4 * sizeof(unsigned);
1168 	} while (phys != end);
1169 }
1170 
1171 /*
1172  *	pmap_copy_page copies the specified (machine independent)
1173  *	page.
1174  */
1175 void
1176 pmap_copy_page(src, dst)
1177 	register vm_offset_t src, dst;
1178 {
1179 	register vm_offset_t end;
1180 	register unsigned tmp0, tmp1, tmp2, tmp3;
1181 
1182 #ifdef DEBUG
1183 	if (pmapdebug & PDB_FOLLOW)
1184 		printf("pmap_copy_page(%x, %x)\n", src, dst);
1185 #endif
1186 	end = src + PAGE_SIZE;
1187 	do {
1188 		tmp0 = ((unsigned *)src)[0];
1189 		tmp1 = ((unsigned *)src)[1];
1190 		tmp2 = ((unsigned *)src)[2];
1191 		tmp3 = ((unsigned *)src)[3];
1192 		((unsigned *)dst)[0] = tmp0;
1193 		((unsigned *)dst)[1] = tmp1;
1194 		((unsigned *)dst)[2] = tmp2;
1195 		((unsigned *)dst)[3] = tmp3;
1196 		src += 4 * sizeof(unsigned);
1197 		dst += 4 * sizeof(unsigned);
1198 	} while (src != end);
1199 }
1200 
1201 /*
1202  *	Routine:	pmap_pageable
1203  *	Function:
1204  *		Make the specified pages (by pmap, offset)
1205  *		pageable (or not) as requested.
1206  *
1207  *		A page which is not pageable may not take
1208  *		a fault; therefore, its page table entry
1209  *		must remain valid for the duration.
1210  *
1211  *		This routine is merely advisory; pmap_enter
1212  *		will specify that these pages are to be wired
1213  *		down (or not) as appropriate.
1214  */
1215 void
1216 pmap_pageable(pmap, sva, eva, pageable)
1217 	pmap_t		pmap;
1218 	vm_offset_t	sva, eva;
1219 	boolean_t	pageable;
1220 {
1221 
1222 #ifdef DEBUG
1223 	if (pmapdebug & PDB_FOLLOW)
1224 		printf("pmap_pageable(%x, %x, %x, %x)\n",
1225 		       pmap, sva, eva, pageable);
1226 #endif
1227 }
1228 
1229 /*
1230  *	Clear the modify bits on the specified physical page.
1231  */
1232 void
1233 pmap_clear_modify(pa)
1234 	vm_offset_t pa;
1235 {
1236 	pmap_hash_t hp;
1237 
1238 #ifdef DEBUG
1239 	if (pmapdebug & PDB_FOLLOW)
1240 		printf("pmap_clear_modify(%x)\n", pa);
1241 #endif
1242 #ifdef ATTR
1243 	pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_MOD;
1244 #endif
1245 }
1246 
1247 /*
1248  *	pmap_clear_reference:
1249  *
1250  *	Clear the reference bit on the specified physical page.
1251  */
1252 void
1253 pmap_clear_reference(pa)
1254 	vm_offset_t pa;
1255 {
1256 
1257 #ifdef DEBUG
1258 	if (pmapdebug & PDB_FOLLOW)
1259 		printf("pmap_clear_reference(%x)\n", pa);
1260 #endif
1261 #ifdef ATTR
1262 	pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_REF;
1263 #endif
1264 }
1265 
1266 /*
1267  *	pmap_is_referenced:
1268  *
1269  *	Return whether or not the specified physical page is referenced
1270  *	by any physical maps.
1271  */
1272 boolean_t
1273 pmap_is_referenced(pa)
1274 	vm_offset_t pa;
1275 {
1276 #ifdef ATTR
1277 	return(pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_REF);
1278 #else
1279 	return(FALSE);
1280 #endif
1281 }
1282 
1283 /*
1284  *	pmap_is_modified:
1285  *
1286  *	Return whether or not the specified physical page is modified
1287  *	by any physical maps.
1288  */
1289 boolean_t
1290 pmap_is_modified(pa)
1291 	vm_offset_t pa;
1292 {
1293 #ifdef ATTR
1294 	return(pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_MOD);
1295 #else
1296 	return(FALSE);
1297 #endif
1298 }
1299 
1300 vm_offset_t
1301 pmap_phys_address(ppn)
1302 	int ppn;
1303 {
1304 
1305 #ifdef DEBUG
1306 	if (pmapdebug & PDB_FOLLOW)
1307 		printf("pmap_phys_address(%x)\n", ppn);
1308 #endif
1309 	panic("pmap_phys_address"); /* XXX */
1310 	return(pmax_ptob(ppn));
1311 }
1312 
1313 /*
1314  * Miscellaneous support routines
1315  */
1316 
1317 /*
1318  * Allocate a hardware PID and return it.
1319  * Also, change the hardwired TLB entry to point to the current TLB cache.
1320  * This is called by swtch().
1321  */
1322 int
1323 pmap_alloc_tlbpid(p)
1324 	register struct proc *p;
1325 {
1326 	register pmap_t pmap;
1327 	register u_int i;
1328 	register int id;
1329 
1330 	pmap = &p->p_vmspace->vm_pmap;
1331 	if ((id = pmap->pm_tlbpid) >= 0) {
1332 		if (pmap->pm_flags & PM_MODIFIED) {
1333 			pmap->pm_flags &= ~PM_MODIFIED;
1334 			MachTLBFlushPID(id);
1335 		}
1336 		goto done;
1337 	}
1338 
1339 	if ((i = whichpids[0]) != 0xFFFFFFFF)
1340 		id = 0;
1341 	else if ((i = whichpids[1]) != 0xFFFFFFFF)
1342 		id = 32;
1343 	else {
1344 		register struct proc *q;
1345 		register pmap_t q_pmap;
1346 
1347 		/*
1348 		 * Have to find a tlbpid to recycle.
1349 		 * There is probably a better way to do this.
1350 		 */
1351 		for (q = allproc; q != NULL; q = q->p_nxt) {
1352 			q_pmap = &q->p_vmspace->vm_pmap;
1353 			if ((id = q_pmap->pm_tlbpid) < 0)
1354 				continue;
1355 			if (q->p_stat != SRUN)
1356 				goto fnd;
1357 		}
1358 		if (id < 0)
1359 			panic("TLBPidAlloc");
1360 	fnd:
1361 		printf("pmap_alloc_tlbpid: recycle pid %d (%s) tlbpid %d\n",
1362 			q->p_pid, q->p_comm, id); /* XXX */
1363 		/*
1364 		 * Even though the virtual to physical mapping hasn't changed,
1365 		 * we need to clear the PID tag in the high entry of the cache.
1366 		 */
1367 		if (q_pmap->pm_hash != zero_pmap_hash) {
1368 			register pmap_hash_t hp;
1369 
1370 			hp = q_pmap->pm_hash;
1371 			for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) {
1372 				if (!hp->high)
1373 					continue;
1374 
1375 				if (hp->low & PG_WIRED) {
1376 					printf("Clearing wired user entry! h %x l %x\n", hp->high, hp->low);
1377 					panic("pmap_alloc_tlbpid: wired");
1378 				}
1379 				pmap_remove_pv(pmap, hp->high & PG_FRAME,
1380 					hp->low & PG_FRAME);
1381 				hp->high = 0;
1382 				q_pmap->pm_stats.resident_count--;
1383 			}
1384 		}
1385 		q_pmap->pm_tlbpid = -1;
1386 		MachTLBFlushPID(id);
1387 #ifdef DEBUG
1388 		remove_stats.pidflushes++;
1389 #endif
1390 		pmap->pm_tlbpid = id;
1391 		goto done;
1392 	}
1393 	while (i & 1) {
1394 		i >>= 1;
1395 		id++;
1396 	}
1397 	whichpids[id >> 5] |= 1 << (id & 0x1F);
1398 	pmap->pm_tlbpid = id;
1399 done:
1400 	/*
1401 	 * Map in new TLB cache.
1402 	 */
1403 	if (pmap == cur_pmap)
1404 		return (id);
1405 	cur_pmap = pmap;
1406 	for (i = 0; i < PMAP_HASH_UPAGES; i++) {
1407 		MachTLBWriteIndexed(i + UPAGES,
1408 			(PMAP_HASH_UADDR + (i << PGSHIFT)) |
1409 				(id << VMMACH_TLB_PID_SHIFT),
1410 			pmap->pm_hash_ptes[i]);
1411 	}
1412 	return (id);
1413 }
1414 
1415 /*
1416  * Remove a physical to virtual address translation.
1417  */
1418 void
1419 pmap_remove_pv(pmap, va, pa)
1420 	pmap_t pmap;
1421 	vm_offset_t va, pa;
1422 {
1423 	register pv_entry_t pv, npv;
1424 	int s;
1425 
1426 #ifdef DEBUG
1427 	if (pmapdebug & PDB_FOLLOW)
1428 		printf("pmap_remove_pv(%x, %x, %x)\n", pmap, va, pa);
1429 #endif
1430 	/*
1431 	 * Remove page from the PV table (raise IPL since we
1432 	 * may be called at interrupt time).
1433 	 */
1434 	if (!IS_VM_PHYSADDR(pa))
1435 		return;
1436 	pv = pa_to_pvh(pa);
1437 	s = splimp();
1438 	/*
1439 	 * If it is the first entry on the list, it is actually
1440 	 * in the header and we must copy the following entry up
1441 	 * to the header.  Otherwise we must search the list for
1442 	 * the entry.  In either case we free the now unused entry.
1443 	 */
1444 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
1445 		npv = pv->pv_next;
1446 		if (npv) {
1447 			*pv = *npv;
1448 			free((caddr_t)npv, M_VMPVENT);
1449 		} else
1450 			pv->pv_pmap = NULL;
1451 #ifdef DEBUG
1452 		remove_stats.pvfirst++;
1453 #endif
1454 	} else {
1455 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
1456 #ifdef DEBUG
1457 			remove_stats.pvsearch++;
1458 #endif
1459 			if (pmap == npv->pv_pmap && va == npv->pv_va)
1460 				goto fnd;
1461 		}
1462 #ifdef DIAGNOSTIC
1463 		printf("pmap_remove_pv(%x, %x, %x) not found\n", pmap, va, pa);
1464 		panic("pmap_remove_pv");
1465 #endif
1466 	fnd:
1467 		pv->pv_next = npv->pv_next;
1468 		free((caddr_t)npv, M_VMPVENT);
1469 	}
1470 	splx(s);
1471 }
1472 
1473 #ifdef DEBUG
1474 pmap_print(pmap)
1475 	pmap_t pmap;
1476 {
1477 	register pmap_hash_t hp;
1478 	register int i;
1479 
1480 	printf("\tpmap_print(%x)\n", pmap);
1481 
1482 	if (pmap->pm_hash == zero_pmap_hash) {
1483 		printf("pm_hash == zero\n");
1484 		return;
1485 	}
1486 	if (pmap->pm_hash == (pmap_hash_t)0) {
1487 		printf("pm_hash == kernel\n");
1488 		return;
1489 	}
1490 	hp = pmap->pm_hash;
1491 	for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) {
1492 		if (!hp->high)
1493 			continue;
1494 		printf("%d: hi %x low %x\n", i, hp->high, hp->low);
1495 	}
1496 }
1497 #endif
1498