xref: /original-bsd/sys/pmax/pmax/pmap.c (revision 08d0ae13)
1 /*
2  * Copyright (c) 1992 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department and Ralph Campbell.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)pmap.c	7.13 (Berkeley) 04/05/93
12  */
13 
14 /*
15  *	Manages physical address maps.
16  *
17  *	In addition to hardware address maps, this
18  *	module is called upon to provide software-use-only
19  *	maps which may or may not be stored in the same
20  *	form as hardware maps.  These pseudo-maps are
21  *	used to store intermediate results from copy
22  *	operations to and from address spaces.
23  *
24  *	Since the information managed by this module is
25  *	also stored by the logical address mapping module,
26  *	this module may throw away valid virtual-to-physical
27  *	mappings at almost any time.  However, invalidations
28  *	of virtual-to-physical mappings must be done as
29  *	requested.
30  *
31  *	In order to cope with hardware architectures which
32  *	make virtual-to-physical map invalidates expensive,
33  *	this module may delay invalidate or reduced protection
34  *	operations until such time as they are actually
35  *	necessary.  This module is given full information as
36  *	to which processors are currently using which maps,
37  *	and to when physical maps must be made correct.
38  */
39 
40 #include <sys/param.h>
41 #include <sys/proc.h>
42 #include <sys/malloc.h>
43 #include <sys/user.h>
44 
45 #include <vm/vm.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_page.h>
48 
49 #include <machine/machConst.h>
50 #include <machine/pte.h>
51 
52 /*
53  * For each vm_page_t, there is a list of all currently valid virtual
54  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
55  * XXX really should do this as a part of the higher level code.
56  */
57 typedef struct pv_entry {
58 	struct pv_entry	*pv_next;	/* next pv_entry */
59 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
60 	vm_offset_t	pv_va;		/* virtual address for mapping */
61 } *pv_entry_t;
62 
63 pv_entry_t	pv_table;	/* array of entries, one per page */
64 extern void	pmap_remove_pv();
65 
66 #define pa_index(pa)		atop((pa) - first_phys_addr)
67 #define pa_to_pvh(pa)		(&pv_table[pa_index(pa)])
68 
69 #ifdef DEBUG
70 struct {
71 	int kernel;	/* entering kernel mapping */
72 	int user;	/* entering user mapping */
73 	int ptpneeded;	/* needed to allocate a PT page */
74 	int pwchange;	/* no mapping change, just wiring or protection */
75 	int wchange;	/* no mapping change, just wiring */
76 	int mchange;	/* was mapped but mapping to different page */
77 	int managed;	/* a managed page */
78 	int firstpv;	/* first mapping for this PA */
79 	int secondpv;	/* second mapping for this PA */
80 	int ci;		/* cache inhibited */
81 	int unmanaged;	/* not a managed page */
82 	int flushes;	/* cache flushes */
83 	int cachehit;	/* new entry forced valid entry out */
84 } enter_stats;
85 struct {
86 	int calls;
87 	int removes;
88 	int flushes;
89 	int pidflushes;	/* HW pid stolen */
90 	int pvfirst;
91 	int pvsearch;
92 } remove_stats;
93 
94 int pmapdebug;
95 #define PDB_FOLLOW	0x0001
96 #define PDB_INIT	0x0002
97 #define PDB_ENTER	0x0004
98 #define PDB_REMOVE	0x0008
99 #define PDB_CREATE	0x0010
100 #define PDB_PTPAGE	0x0020
101 #define PDB_CACHE	0x0040
102 #define PDB_BITS	0x0080
103 #define PDB_COLLECT	0x0100
104 #define PDB_PROTECT	0x0200
105 #define PDB_TLBPID	0x0400
106 #define PDB_PARANOIA	0x2000
107 #define PDB_WIRING	0x4000
108 #define PDB_PVDUMP	0x8000
109 
110 #endif /* DEBUG */
111 
112 u_int	whichpids[2] = {	/* bit mask of hardware PID's in use */
113 	3, 0
114 };
115 
116 struct pmap	kernel_pmap_store;
117 pmap_t		cur_pmap;	/* current pmap mapped in hardware */
118 
119 vm_offset_t    	avail_start;	/* PA of first available physical page */
120 vm_offset_t	avail_end;	/* PA of last available physical page */
121 vm_size_t	mem_size;	/* memory size in bytes */
122 vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
123 vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
124 int		pmaxpagesperpage;	/* PAGE_SIZE / NBPG */
125 #ifdef ATTR
126 char		*pmap_attributes;	/* reference and modify bits */
127 #endif
128 pmap_hash_t	zero_pmap_hash;		/* empty TLB hash table for init */
129 
130 /*
131  *	Bootstrap the system enough to run with virtual memory.
132  */
133 void
134 pmap_bootstrap(firstaddr)
135 	vm_offset_t firstaddr;
136 {
137 	register int i;
138 	vm_offset_t start = firstaddr;
139 	extern int maxmem, physmem;
140 
141 	/*
142 	 * Allocate a TLB hash table for the kernel.
143 	 * This could be a KSEG0 address and thus save TLB entries but
144 	 * its faster and simpler in assembly language to have a
145 	 * fixed address that can be accessed with a 16 bit signed offset.
146 	 * Note: the kernel pm_hash field is null, user pm_hash fields are
147 	 * either the table or zero_pmap_hash.
148 	 */
149 	kernel_pmap_store.pm_hash = (pmap_hash_t)0;
150 	for (i = 0; i < PMAP_HASH_KPAGES; i++) {
151 		MachTLBWriteIndexed(i + UPAGES + PMAP_HASH_UPAGES,
152 			PMAP_HASH_KADDR + (i << PGSHIFT),
153 			firstaddr | PG_V | PG_M | PG_G);
154 		firstaddr += NBPG;
155 	}
156 
157 	/*
158 	 * Allocate an empty TLB hash table for initial pmap's.
159 	 */
160 	zero_pmap_hash = (pmap_hash_t)MACH_PHYS_TO_CACHED(firstaddr);
161 
162 	/* init proc[0]'s pmap hash table */
163 	for (i = 0; i < PMAP_HASH_UPAGES; i++) {
164 		kernel_pmap_store.pm_hash_ptes[i] = firstaddr | PG_V | PG_RO;
165 		MachTLBWriteIndexed(i + UPAGES,
166 			(PMAP_HASH_UADDR + (i << PGSHIFT)) |
167 				(1 << VMMACH_TLB_PID_SHIFT),
168 			kernel_pmap_store.pm_hash_ptes[i]);
169 		firstaddr += NBPG;
170 	}
171 
172 	/*
173 	 * Allocate memory for pv_table.
174 	 * This will allocate more entries than we really need.
175 	 * We should do this in pmap_init when we know the actual
176 	 * phys_start and phys_end but its better to use phys addresses
177 	 * rather than kernel virtual addresses mapped through the TLB.
178 	 */
179 	i = (maxmem - pmax_btop(firstaddr)) * sizeof(struct pv_entry);
180 	i = pmax_round_page(i);
181 	pv_table = (pv_entry_t)MACH_PHYS_TO_CACHED(firstaddr);
182 	firstaddr += i;
183 
184 	/*
185 	 * Clear allocated memory.
186 	 */
187 	bzero((caddr_t)MACH_PHYS_TO_CACHED(start), firstaddr - start);
188 
189 	avail_start = firstaddr;
190 	avail_end = pmax_ptob(maxmem);
191 	mem_size = avail_end - avail_start;
192 
193 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
194 	virtual_end = VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES * NPTEPG * NBPG;
195 	/* XXX need to decide how to set cnt.v_page_size */
196 	pmaxpagesperpage = 1;
197 
198 	cur_pmap = &kernel_pmap_store;
199 	simple_lock_init(&kernel_pmap_store.pm_lock);
200 	kernel_pmap_store.pm_count = 1;
201 }
202 
203 /*
204  * Bootstrap memory allocator. This function allows for early dynamic
205  * memory allocation until the virtual memory system has been bootstrapped.
206  * After that point, either kmem_alloc or malloc should be used. This
207  * function works by stealing pages from the (to be) managed page pool,
208  * stealing virtual address space, then mapping the pages and zeroing them.
209  *
210  * It should be used from pmap_bootstrap till vm_page_startup, afterwards
211  * it cannot be used, and will generate a panic if tried. Note that this
212  * memory will never be freed, and in essence it is wired down.
213  */
214 void *
215 pmap_bootstrap_alloc(size)
216 	int size;
217 {
218 	vm_offset_t val;
219 	extern boolean_t vm_page_startup_initialized;
220 
221 	if (vm_page_startup_initialized)
222 		panic("pmap_bootstrap_alloc: called after startup initialized");
223 
224 	val = MACH_PHYS_TO_CACHED(avail_start);
225 	size = round_page(size);
226 	avail_start += size;
227 
228 	blkclr((caddr_t)val, size);
229 	return ((void *)val);
230 }
231 
232 /*
233  *	Initialize the pmap module.
234  *	Called by vm_init, to initialize any structures that the pmap
235  *	system needs to map virtual memory.
236  */
237 void
238 pmap_init(phys_start, phys_end)
239 	vm_offset_t phys_start, phys_end;
240 {
241 
242 #ifdef DEBUG
243 	if (pmapdebug & PDB_FOLLOW)
244 		printf("pmap_init(%x, %x)\n", phys_start, phys_end);
245 #endif
246 }
247 
248 /*
249  *	Used to map a range of physical addresses into kernel
250  *	virtual address space.
251  *
252  *	This routine should only be called by vm_page_startup()
253  *	with KSEG0 addresses.
254  */
255 vm_offset_t
256 pmap_map(virt, start, end, prot)
257 	vm_offset_t virt;
258 	vm_offset_t start;
259 	vm_offset_t end;
260 	int prot;
261 {
262 
263 #ifdef DEBUG
264 	if (pmapdebug & PDB_FOLLOW)
265 		printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
266 #endif
267 
268 	return (round_page(end));
269 }
270 
271 /*
272  *	Create and return a physical map.
273  *
274  *	If the size specified for the map
275  *	is zero, the map is an actual physical
276  *	map, and may be referenced by the
277  *	hardware.
278  *
279  *	If the size specified is non-zero,
280  *	the map will be used in software only, and
281  *	is bounded by that size.
282  */
283 pmap_t
284 pmap_create(size)
285 	vm_size_t size;
286 {
287 	register pmap_t pmap;
288 
289 #ifdef DEBUG
290 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
291 		printf("pmap_create(%x)\n", size);
292 #endif
293 	/*
294 	 * Software use map does not need a pmap
295 	 */
296 	if (size)
297 		return (NULL);
298 
299 	printf("pmap_create(%x) XXX\n", size); /* XXX */
300 	/* XXX: is it ok to wait here? */
301 	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
302 #ifdef notifwewait
303 	if (pmap == NULL)
304 		panic("pmap_create: cannot allocate a pmap");
305 #endif
306 	bzero(pmap, sizeof(*pmap));
307 	pmap_pinit(pmap);
308 	return (pmap);
309 }
310 
311 /*
312  * Initialize a preallocated and zeroed pmap structure,
313  * such as one in a vmspace structure.
314  */
315 void
316 pmap_pinit(pmap)
317 	register struct pmap *pmap;
318 {
319 	register int i;
320 	extern struct vmspace vmspace0;
321 
322 #ifdef DEBUG
323 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
324 		printf("pmap_pinit(%x)\n", pmap);
325 #endif
326 	simple_lock_init(&pmap->pm_lock);
327 	pmap->pm_count = 1;
328 	pmap->pm_flags = 0;
329 	pmap->pm_hash = zero_pmap_hash;
330 	for (i = 0; i < PMAP_HASH_UPAGES; i++)
331 		pmap->pm_hash_ptes[i] =
332 			(MACH_CACHED_TO_PHYS(zero_pmap_hash) + (i << PGSHIFT)) |
333 				PG_V | PG_RO;
334 	if (pmap == &vmspace0.vm_pmap)
335 		pmap->pm_tlbpid = 1;	/* preallocated in mach_init() */
336 	else
337 		pmap->pm_tlbpid = -1;	/* none allocated yet */
338 }
339 
340 /*
341  *	Retire the given physical map from service.
342  *	Should only be called if the map contains
343  *	no valid mappings.
344  */
345 void
346 pmap_destroy(pmap)
347 	register pmap_t pmap;
348 {
349 	int count;
350 
351 #ifdef DEBUG
352 	if (pmapdebug & PDB_FOLLOW)
353 		printf("pmap_destroy(%x)\n", pmap);
354 #endif
355 	if (pmap == NULL)
356 		return;
357 
358 	printf("pmap_destroy(%x) XXX\n", pmap); /* XXX */
359 	simple_lock(&pmap->pm_lock);
360 	count = --pmap->pm_count;
361 	simple_unlock(&pmap->pm_lock);
362 	if (count == 0) {
363 		pmap_release(pmap);
364 		free((caddr_t)pmap, M_VMPMAP);
365 	}
366 }
367 
368 /*
369  * Release any resources held by the given physical map.
370  * Called when a pmap initialized by pmap_pinit is being released.
371  * Should only be called if the map contains no valid mappings.
372  */
373 void
374 pmap_release(pmap)
375 	register pmap_t pmap;
376 {
377 	register int id;
378 #ifdef DIAGNOSTIC
379 	register int i;
380 #endif
381 
382 #ifdef DEBUG
383 	if (pmapdebug & PDB_FOLLOW)
384 		printf("pmap_release(%x)\n", pmap);
385 #endif
386 
387 	if (pmap->pm_hash && pmap->pm_hash != zero_pmap_hash) {
388 		kmem_free(kernel_map, (vm_offset_t)pmap->pm_hash,
389 			PMAP_HASH_SIZE);
390 		pmap->pm_hash = zero_pmap_hash;
391 	}
392 	if ((id = pmap->pm_tlbpid) < 0)
393 		return;
394 #ifdef DIAGNOSTIC
395 	if (!(whichpids[id >> 5] & (1 << (id & 0x1F))))
396 		panic("pmap_release: id free");
397 #endif
398 	MachTLBFlushPID(id);
399 	whichpids[id >> 5] &= ~(1 << (id & 0x1F));
400 	pmap->pm_flags &= ~PM_MODIFIED;
401 	pmap->pm_tlbpid = -1;
402 	if (pmap == cur_pmap)
403 		cur_pmap = (pmap_t)0;
404 #ifdef DIAGNOSTIC
405 	/* invalidate user PTE cache */
406 	for (i = 0; i < PMAP_HASH_UPAGES; i++)
407 		MachTLBWriteIndexed(i + UPAGES, MACH_RESERVED_ADDR, 0);
408 #endif
409 }
410 
411 /*
412  *	Add a reference to the specified pmap.
413  */
414 void
415 pmap_reference(pmap)
416 	pmap_t pmap;
417 {
418 
419 #ifdef DEBUG
420 	if (pmapdebug & PDB_FOLLOW)
421 		printf("pmap_reference(%x)\n", pmap);
422 #endif
423 	if (pmap != NULL) {
424 		simple_lock(&pmap->pm_lock);
425 		pmap->pm_count++;
426 		simple_unlock(&pmap->pm_lock);
427 	}
428 }
429 
430 /*
431  *	Remove the given range of addresses from the specified map.
432  *
433  *	It is assumed that the start and end are properly
434  *	rounded to the page size.
435  */
436 void
437 pmap_remove(pmap, sva, eva)
438 	register pmap_t pmap;
439 	vm_offset_t sva, eva;
440 {
441 	register vm_offset_t va;
442 	register pv_entry_t pv, npv;
443 	register int i;
444 	pmap_hash_t hp;
445 	unsigned entry;
446 
447 #ifdef DEBUG
448 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
449 		printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
450 	remove_stats.calls++;
451 #endif
452 	if (pmap == NULL)
453 		return;
454 
455 	/* anything in the cache? */
456 	if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash)
457 		return;
458 
459 	if (!pmap->pm_hash) {
460 		register pt_entry_t *pte;
461 
462 		/* remove entries from kernel pmap */
463 #ifdef DIAGNOSTIC
464 		if (sva < VM_MIN_KERNEL_ADDRESS ||
465 		    eva > VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
466 			panic("pmap_remove");
467 #endif
468 		pte = kvtopte(sva);
469 		for (va = sva; va < eva; va += NBPG, pte++) {
470 			entry = pte->pt_entry;
471 			if (!(entry & PG_V))
472 				continue;
473 			if (entry & PG_WIRED)
474 				pmap->pm_stats.wired_count--;
475 			pmap->pm_stats.resident_count--;
476 			pmap_remove_pv(pmap, va, entry & PG_FRAME);
477 #ifdef ATTR
478 			pmap_attributes[atop(entry - KERNBASE)] = 0;
479 #endif
480 			pte->pt_entry = PG_NV;
481 			/*
482 			 * Flush the TLB for the given address.
483 			 */
484 			MachTLBFlushAddr(va);
485 #ifdef DEBUG
486 			remove_stats.flushes++;
487 #endif
488 		}
489 		return;
490 	}
491 
492 	va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
493 	eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
494 	/*
495 	 * If we are not in the current address space, just flush the
496 	 * software cache and not the hardware.
497 	 */
498 	if (pmap != cur_pmap) {
499 		for (; va < eva; va += NBPG) {
500 			hp = &pmap->pm_hash[PMAP_HASH(va)];
501 			if (hp->pmh_pte[0].high == va)
502 				i = 0;
503 			else if (hp->pmh_pte[1].high == va)
504 				i = 1;
505 			else
506 				continue;
507 
508 			hp->pmh_pte[i].high = 0;
509 			entry = hp->pmh_pte[i].low;
510 			if (entry & PG_WIRED)
511 				pmap->pm_stats.wired_count--;
512 			pmap->pm_stats.resident_count--;
513 			pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME);
514 #ifdef ATTR
515 			pmap_attributes[atop(entry - KERNBASE)] = 0;
516 #endif
517 			pmap->pm_flags |= PM_MODIFIED;
518 #ifdef DEBUG
519 			remove_stats.removes++;
520 #endif
521 		}
522 		return;
523 	}
524 
525 	for (; va < eva; va += NBPG) {
526 		hp = &pmap->pm_hash[PMAP_HASH(va)];
527 		if (hp->pmh_pte[0].high == va)
528 			i = 0;
529 		else if (hp->pmh_pte[1].high == va)
530 			i = 1;
531 		else
532 			continue;
533 
534 		hp->pmh_pte[i].high = 0;
535 		entry = hp->pmh_pte[i].low;
536 		if (entry & PG_WIRED)
537 			pmap->pm_stats.wired_count--;
538 		pmap->pm_stats.resident_count--;
539 		pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME);
540 #ifdef ATTR
541 		pmap_attributes[atop(entry - KERNBASE)] = 0;
542 #endif
543 		/*
544 		* Flush the TLB for the given address.
545 		*/
546 		MachTLBFlushAddr(va);
547 #ifdef DEBUG
548 		remove_stats.flushes++;
549 #endif
550 	}
551 }
552 
553 /*
554  *	pmap_page_protect:
555  *
556  *	Lower the permission for all mappings to a given page.
557  */
558 void
559 pmap_page_protect(pa, prot)
560 	vm_offset_t pa;
561 	vm_prot_t prot;
562 {
563 	register pv_entry_t pv;
564 	register vm_offset_t va;
565 	int s;
566 
567 #ifdef DEBUG
568 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
569 	    prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
570 		printf("pmap_page_protect(%x, %x)\n", pa, prot);
571 #endif
572 	if (!IS_VM_PHYSADDR(pa))
573 		return;
574 
575 	switch (prot) {
576 	case VM_PROT_ALL:
577 		break;
578 
579 	/* copy_on_write */
580 	case VM_PROT_READ:
581 	case VM_PROT_READ|VM_PROT_EXECUTE:
582 		pv = pa_to_pvh(pa);
583 		s = splimp();
584 		/*
585 		 * Loop over all current mappings setting/clearing as appropos.
586 		 */
587 		if (pv->pv_pmap != NULL) {
588 			for (; pv; pv = pv->pv_next) {
589 				extern vm_offset_t pager_sva, pager_eva;
590 				va = pv->pv_va;
591 
592 				/*
593 				 * XXX don't write protect pager mappings
594 				 */
595 				if (va >= pager_sva && va < pager_eva)
596 					continue;
597 				pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
598 					prot);
599 			}
600 		}
601 		splx(s);
602 		break;
603 
604 	/* remove_all */
605 	default:
606 		pv = pa_to_pvh(pa);
607 		s = splimp();
608 		while (pv->pv_pmap != NULL) {
609 			pmap_remove(pv->pv_pmap, pv->pv_va,
610 				    pv->pv_va + PAGE_SIZE);
611 		}
612 		splx(s);
613 	}
614 }
615 
616 /*
617  *	Set the physical protection on the
618  *	specified range of this map as requested.
619  */
620 void
621 pmap_protect(pmap, sva, eva, prot)
622 	register pmap_t pmap;
623 	vm_offset_t sva, eva;
624 	vm_prot_t prot;
625 {
626 	register vm_offset_t va;
627 	register int i;
628 	pmap_hash_t hp;
629 	u_int p;
630 
631 #ifdef DEBUG
632 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
633 		printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
634 #endif
635 	if (pmap == NULL)
636 		return;
637 
638 	/* anything in the software cache? */
639 	if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash)
640 		return;
641 
642 	if (!(prot & VM_PROT_READ)) {
643 		pmap_remove(pmap, sva, eva);
644 		return;
645 	}
646 
647 	if (!pmap->pm_hash) {
648 		register pt_entry_t *pte;
649 
650 		/*
651 		 * Change entries in kernel pmap.
652 		 * This will trap if the page is writeable (in order to set
653 		 * the dirty bit) even if the dirty bit is already set. The
654 		 * optimization isn't worth the effort since this code isn't
655 		 * executed much. The common case is to make a user page
656 		 * read-only.
657 		 */
658 #ifdef DIAGNOSTIC
659 		if (sva < VM_MIN_KERNEL_ADDRESS ||
660 		    eva > VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
661 			panic("pmap_protect");
662 #endif
663 		p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
664 		pte = kvtopte(sva);
665 		for (va = sva; va < eva; va += NBPG, pte++) {
666 			if (!(pte->pt_entry & PG_V))
667 				continue;
668 			pte->pt_entry = (pte->pt_entry & ~(PG_M | PG_RO)) | p;
669 			/*
670 			 * Update the TLB if the given address is in the cache.
671 			 */
672 			MachTLBUpdate(va, pte->pt_entry);
673 		}
674 		return;
675 	}
676 
677 	p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
678 	va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
679 	eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
680 	/*
681 	 * If we are not in the current address space, just flush the
682 	 * software cache and not the hardware.
683 	 */
684 	if (pmap != cur_pmap) {
685 		for (; va < eva; va += NBPG) {
686 			hp = &pmap->pm_hash[PMAP_HASH(va)];
687 			if (hp->pmh_pte[0].high == va)
688 				i = 0;
689 			else if (hp->pmh_pte[1].high == va)
690 				i = 1;
691 			else
692 				continue;
693 
694 			hp->pmh_pte[i].low = (hp->pmh_pte[i].low & ~(PG_M | PG_RO)) | p;
695 			pmap->pm_flags |= PM_MODIFIED;
696 		}
697 		return;
698 	}
699 
700 	for (; va < eva; va += NBPG) {
701 		hp = &pmap->pm_hash[PMAP_HASH(va)];
702 		if (hp->pmh_pte[0].high == va)
703 			i = 0;
704 		else if (hp->pmh_pte[1].high == va)
705 			i = 1;
706 		else
707 			continue;
708 
709 		hp->pmh_pte[i].low = (hp->pmh_pte[i].low & ~(PG_M | PG_RO)) | p;
710 		/*
711 		* Update the TLB if the given address is in the cache.
712 		*/
713 		MachTLBUpdate(hp->pmh_pte[i].high, hp->pmh_pte[i].low);
714 	}
715 }
716 
717 /*
718  *	Insert the given physical page (p) at
719  *	the specified virtual address (v) in the
720  *	target physical map with the protection requested.
721  *
722  *	If specified, the page will be wired down, meaning
723  *	that the related pte can not be reclaimed.
724  *
725  *	NB:  This is the only routine which MAY NOT lazy-evaluate
726  *	or lose information.  That is, this routine must actually
727  *	insert this page into the given map NOW.
728  */
729 void
730 pmap_enter(pmap, va, pa, prot, wired)
731 	register pmap_t pmap;
732 	vm_offset_t va;
733 	register vm_offset_t pa;
734 	vm_prot_t prot;
735 	boolean_t wired;
736 {
737 	register pmap_hash_t hp;
738 	register u_int npte;
739 	register int i, j;
740 	int newpos;
741 
742 #ifdef DEBUG
743 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
744 		printf("pmap_enter(%x, %x, %x, %x, %x)\n",
745 		       pmap, va, pa, prot, wired);
746 #endif
747 #ifdef DIAGNOSTIC
748 	if (!pmap)
749 		panic("pmap_enter: pmap");
750 	if (pmap->pm_tlbpid < 0)
751 		panic("pmap_enter: tlbpid");
752 	if (!pmap->pm_hash) {
753 		enter_stats.kernel++;
754 		if (va < VM_MIN_KERNEL_ADDRESS ||
755 		    va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
756 			panic("pmap_enter: kva");
757 	} else {
758 		enter_stats.user++;
759 		if (va & 0x80000000)
760 			panic("pmap_enter: uva");
761 	}
762 	if (pa & 0x80000000)
763 		panic("pmap_enter: pa");
764 	if (!(prot & VM_PROT_READ))
765 		panic("pmap_enter: prot");
766 #endif
767 
768 	/*
769 	 * See if we need to create a new TLB cache.
770 	 */
771 	if (pmap->pm_hash == zero_pmap_hash) {
772 		register vm_offset_t kva;
773 		register pt_entry_t *pte;
774 
775 		kva = kmem_alloc(kernel_map, PMAP_HASH_SIZE);
776 		pmap->pm_hash = (pmap_hash_t)kva;
777 
778 		/*
779 		 * Convert the kernel virtual address to a physical one
780 		 * and cache it in the pmap. Note: if the phyical address
781 		 * can change (due to memory compaction in kmem_alloc?),
782 		 * we will have to update things.
783 		 */
784 		pte = kvtopte(kva);
785 		for (i = 0; i < PMAP_HASH_UPAGES; i++) {
786 			pmap->pm_hash_ptes[i] = pte->pt_entry & ~PG_G;
787 			pte++;
788 		}
789 
790 		/*
791 		 * Map in new TLB cache if it is current.
792 		 */
793 		if (pmap == cur_pmap) {
794 			for (i = 0; i < PMAP_HASH_UPAGES; i++) {
795 				MachTLBWriteIndexed(i + UPAGES,
796 					(PMAP_HASH_UADDR + (i << PGSHIFT)) |
797 						(pmap->pm_tlbpid  <<
798 						VMMACH_TLB_PID_SHIFT),
799 					pmap->pm_hash_ptes[i]);
800 			}
801 		}
802 #ifdef DIAGNOSTIC
803 		for (i = 0; i < PAGE_SIZE; i += sizeof(int), kva += sizeof(int))
804 			if (*(int *)kva != 0)
805 				panic("pmap_enter: *kva != 0");
806 #endif
807 	}
808 
809 	if (IS_VM_PHYSADDR(pa)) {
810 		register pv_entry_t pv, npv;
811 		int s;
812 
813 		if (!(prot & VM_PROT_WRITE))
814 			npte = PG_RO;
815 		else {
816 			register vm_page_t mem;
817 
818 			mem = PHYS_TO_VM_PAGE(pa);
819 			if ((int)va < 0) {
820 				/*
821 				 * Don't bother to trap on kernel writes,
822 				 * just record page as dirty.
823 				 */
824 				npte = PG_M;
825 				mem->flags &= ~PG_CLEAN;
826 			} else
827 #ifdef ATTR
828 				if ((pmap_attributes[atop(pa - KERNBASE)] &
829 				    PMAP_ATTR_MOD) || !(mem->flags & PG_CLEAN))
830 #else
831 				if (!(mem->flags & PG_CLEAN))
832 #endif
833 					npte = PG_M;
834 			else
835 				npte = 0;
836 		}
837 
838 #ifdef DEBUG
839 		enter_stats.managed++;
840 #endif
841 		/*
842 		 * Enter the pmap and virtual address into the
843 		 * physical to virtual map table.
844 		 */
845 		pv = pa_to_pvh(pa);
846 		s = splimp();
847 #ifdef DEBUG
848 		if (pmapdebug & PDB_ENTER)
849 			printf("pmap_enter: pv %x: was %x/%x/%x\n",
850 			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
851 #endif
852 		if (pv->pv_pmap == NULL) {
853 			/*
854 			 * No entries yet, use header as the first entry
855 			 */
856 #ifdef DEBUG
857 			enter_stats.firstpv++;
858 #endif
859 			pv->pv_va = va;
860 			pv->pv_pmap = pmap;
861 			pv->pv_next = NULL;
862 		} else {
863 			/*
864 			 * There is at least one other VA mapping this page.
865 			 * Place this entry after the header.
866 			 *
867 			 * Note: the entry may already be in the table if
868 			 * we are only changing the protection bits.
869 			 */
870 			for (npv = pv; npv; npv = npv->pv_next)
871 				if (pmap == npv->pv_pmap && va == npv->pv_va) {
872 #ifdef DIAGNOSTIC
873 				    if (!pmap->pm_hash) {
874 					unsigned entry;
875 
876 					entry = kvtopte(va)->pt_entry;
877 					if (!(entry & PG_V) ||
878 					    (entry & PG_FRAME) != pa)
879 			printf("found kva %x pa %x in pv_table but != %x\n",
880 				va, pa, entry);
881 				    } else {
882 					hp = &pmap->pm_hash[PMAP_HASH(va)];
883 					if ((hp->pmh_pte[0].high == (va |
884 					(pmap->pm_tlbpid <<
885 					VMMACH_TLB_PID_SHIFT)) &&
886 					(hp->pmh_pte[0].low & PG_FRAME) == pa) ||
887 					(hp->pmh_pte[1].high == (va |
888 					(pmap->pm_tlbpid <<
889 					VMMACH_TLB_PID_SHIFT)) &&
890 					(hp->pmh_pte[1].low & PG_FRAME) == pa))
891 						goto fnd;
892 			printf("found va %x pa %x in pv_table but !=\n",
893 				va, pa);
894 				    }
895 #endif
896 					goto fnd;
897 				}
898 			/* can this cause us to recurse forever? */
899 			npv = (pv_entry_t)
900 				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
901 			npv->pv_va = va;
902 			npv->pv_pmap = pmap;
903 			npv->pv_next = pv->pv_next;
904 			pv->pv_next = npv;
905 #ifdef DEBUG
906 			if (!npv->pv_next)
907 				enter_stats.secondpv++;
908 #endif
909 		fnd:
910 			;
911 		}
912 		splx(s);
913 	} else {
914 		/*
915 		 * Assumption: if it is not part of our managed memory
916 		 * then it must be device memory which may be volitile.
917 		 */
918 #ifdef DEBUG
919 		enter_stats.unmanaged++;
920 #endif
921 		npte = (prot & VM_PROT_WRITE) ? (PG_M | PG_N) : (PG_RO | PG_N);
922 	}
923 
924 	/*
925 	 * The only time we need to flush the cache is if we
926 	 * execute from a physical address and then change the data.
927 	 * This is the best place to do this.
928 	 * pmap_protect() and pmap_remove() are mostly used to switch
929 	 * between R/W and R/O pages.
930 	 * NOTE: we only support cache flush for read only text.
931 	 */
932 	if (prot == (VM_PROT_READ | VM_PROT_EXECUTE))
933 		MachFlushICache(MACH_PHYS_TO_CACHED(pa), PAGE_SIZE);
934 
935 	if (!pmap->pm_hash) {
936 		register pt_entry_t *pte;
937 
938 		/* enter entries into kernel pmap */
939 		pte = kvtopte(va);
940 		npte |= pa | PG_V | PG_G;
941 		if (wired) {
942 			pmap->pm_stats.wired_count += pmaxpagesperpage;
943 			npte |= PG_WIRED;
944 		}
945 		i = pmaxpagesperpage;
946 		do {
947 			if (!(pte->pt_entry & PG_V)) {
948 				pmap->pm_stats.resident_count++;
949 				MachTLBWriteRandom(va, npte);
950 			} else {
951 #ifdef DIAGNOSTIC
952 				if (pte->pt_entry & PG_WIRED)
953 					panic("pmap_enter: kernel wired");
954 #endif
955 				/*
956 				 * Update the same virtual address entry.
957 				 */
958 				MachTLBUpdate(va, npte);
959 				printf("TLB update kva %x pte %x -> %x\n",
960 					va, pte->pt_entry, npte); /* XXX */
961 			}
962 			pte->pt_entry = npte;
963 			va += NBPG;
964 			npte += NBPG;
965 			pte++;
966 		} while (--i != 0);
967 		return;
968 	}
969 
970 	/*
971 	 * Now validate mapping with desired protection/wiring.
972 	 * Assume uniform modified and referenced status for all
973 	 * PMAX pages in a MACH page.
974 	 */
975 	npte |= pa | PG_V;
976 	if (wired) {
977 		pmap->pm_stats.wired_count += pmaxpagesperpage;
978 		npte |= PG_WIRED;
979 	}
980 #ifdef DEBUG
981 	if (pmapdebug & PDB_ENTER)
982 		printf("pmap_enter: new pte value %x\n", npte);
983 #endif
984 	va |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
985 	i = pmaxpagesperpage;
986 	do {
987 		hp = &pmap->pm_hash[PMAP_HASH(va)];
988 		if (hp->pmh_pte[0].high == va &&
989 		    (hp->pmh_pte[0].low & PG_FRAME) == (npte & PG_FRAME))
990 			j = 0;
991 		else if (hp->pmh_pte[1].high == va &&
992 		    (hp->pmh_pte[1].low & PG_FRAME) == (npte & PG_FRAME))
993 			j = 1;
994 		else
995 			j = -1;
996 		if (j >= 0) {
997 #ifdef DEBUG
998 			enter_stats.cachehit++;
999 #endif
1000 			if (!(hp->pmh_pte[j].low & PG_WIRED)) {
1001 				/*
1002 				 * Update the same entry.
1003 				 */
1004 				hp->pmh_pte[j].low = npte;
1005 				MachTLBUpdate(va, npte);
1006 			} else {
1007 				/*
1008 				 * Don't replace wired entries, just update
1009 				 * the hardware TLB.
1010 				 * Bug: routines to flush the TLB won't know
1011 				 * that the entry is in the hardware.
1012 				 */
1013 				printf("pmap_enter: wired va %x %x\n", va,
1014 					hp->pmh_pte[j].low); /* XXX */
1015 				panic("pmap_enter: wired"); /* XXX */
1016 				MachTLBWriteRandom(va, npte);
1017 			}
1018 			goto next;
1019 		}
1020 		if (!hp->pmh_pte[0].high)
1021 			j = 0;
1022 		else if (!hp->pmh_pte[1].high)
1023 			j = 1;
1024 		else
1025 			j = -1;
1026 		if (j >= 0) {
1027 			pmap->pm_stats.resident_count++;
1028 			hp->pmh_pte[j].high = va;
1029 			hp->pmh_pte[j].low = npte;
1030 			MachTLBWriteRandom(va, npte);
1031 		} else {
1032 #ifdef DEBUG
1033 			enter_stats.cachehit++;
1034 #endif
1035 			if (!(hp->pmh_pte[1].low & PG_WIRED)) {
1036 				MachTLBFlushAddr(hp->pmh_pte[1].high);
1037 				pmap_remove_pv(pmap,
1038 					hp->pmh_pte[1].high & PG_FRAME,
1039 					hp->pmh_pte[1].low & PG_FRAME);
1040 				hp->pmh_pte[1] = hp->pmh_pte[0];
1041 				hp->pmh_pte[0].high = va;
1042 				hp->pmh_pte[0].low = npte;
1043 				MachTLBWriteRandom(va, npte);
1044 			} else {
1045 				/*
1046 				 * Don't replace wired entries, just update
1047 				 * the hardware TLB.
1048 				 * Bug: routines to flush the TLB won't know
1049 				 * that the entry is in the hardware.
1050 				 */
1051 				printf("pmap_enter: wired va %x %x\n", va,
1052 					hp->pmh_pte[1].low); /* XXX */
1053 				panic("pmap_enter: wired"); /* XXX */
1054 				MachTLBWriteRandom(va, npte);
1055 			}
1056 		}
1057 next:
1058 		va += NBPG;
1059 		npte += NBPG;
1060 	} while (--i != 0);
1061 }
1062 
1063 /*
1064  *	Routine:	pmap_change_wiring
1065  *	Function:	Change the wiring attribute for a map/virtual-address
1066  *			pair.
1067  *	In/out conditions:
1068  *			The mapping must already exist in the pmap.
1069  */
1070 void
1071 pmap_change_wiring(pmap, va, wired)
1072 	register pmap_t	pmap;
1073 	vm_offset_t va;
1074 	boolean_t wired;
1075 {
1076 	register pmap_hash_t hp;
1077 	u_int p;
1078 	register int i, j;
1079 
1080 #ifdef DEBUG
1081 	if (pmapdebug & PDB_FOLLOW)
1082 		printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);
1083 #endif
1084 	if (pmap == NULL)
1085 		return;
1086 
1087 	p = wired ? PG_WIRED : 0;
1088 
1089 	/*
1090 	 * Don't need to flush the TLB since PG_WIRED is only in software.
1091 	 */
1092 	if (!pmap->pm_hash) {
1093 		register pt_entry_t *pte;
1094 
1095 		/* change entries in kernel pmap */
1096 #ifdef DIAGNOSTIC
1097 		if (va < VM_MIN_KERNEL_ADDRESS ||
1098 		    va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
1099 			panic("pmap_change_wiring");
1100 #endif
1101 		pte = kvtopte(va);
1102 		i = pmaxpagesperpage;
1103 		if (!(pte->pt_entry & PG_WIRED) && p)
1104 			pmap->pm_stats.wired_count += i;
1105 		else if ((pte->pt_entry & PG_WIRED) && !p)
1106 			pmap->pm_stats.wired_count -= i;
1107 		do {
1108 			if (!(pte->pt_entry & PG_V))
1109 				continue;
1110 			pte->pt_entry = (pte->pt_entry & ~PG_WIRED) | p;
1111 			pte++;
1112 		} while (--i != 0);
1113 	} else if (pmap->pm_tlbpid >= 0 && pmap->pm_hash != zero_pmap_hash) {
1114 		i = pmaxpagesperpage;
1115 		va = (va & PG_FRAME) | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
1116 		do {
1117 			hp = &pmap->pm_hash[PMAP_HASH(va)];
1118 			if (hp->pmh_pte[0].high == va)
1119 				j = 0;
1120 			else if (hp->pmh_pte[1].high == va)
1121 				j = 1;
1122 			else {
1123 				va += NBPG;
1124 				continue;
1125 			}
1126 			if (!(hp->pmh_pte[j].low & PG_WIRED) && p)
1127 				pmap->pm_stats.wired_count++;
1128 			else if ((hp->pmh_pte[j].low & PG_WIRED) && !p)
1129 				pmap->pm_stats.wired_count--;
1130 			hp->pmh_pte[j].low = (hp->pmh_pte[j].low & ~PG_WIRED) | p;
1131 			va += NBPG;
1132 		} while (--i != 0);
1133 	}
1134 }
1135 
1136 /*
1137  *	Routine:	pmap_extract
1138  *	Function:
1139  *		Extract the physical page address associated
1140  *		with the given map/virtual_address pair.
1141  */
1142 vm_offset_t
1143 pmap_extract(pmap, va)
1144 	register pmap_t	pmap;
1145 	vm_offset_t va;
1146 {
1147 	register vm_offset_t pa;
1148 	register pmap_hash_t hp;
1149 	register int i;
1150 
1151 #ifdef DEBUG
1152 	if (pmapdebug & PDB_FOLLOW)
1153 		printf("pmap_extract(%x, %x) -> ", pmap, va);
1154 #endif
1155 
1156 	if (!pmap->pm_hash) {
1157 #ifdef DIAGNOSTIC
1158 		if (va < VM_MIN_KERNEL_ADDRESS ||
1159 		    va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
1160 			panic("pmap_extract");
1161 #endif
1162 		pa = kvtopte(va)->pt_entry & PG_FRAME;
1163 	} else if (pmap->pm_tlbpid >= 0) {
1164 		hp = &pmap->pm_hash[PMAP_HASH(va)];
1165 		va = (va & PG_FRAME) | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
1166 		if (hp->pmh_pte[0].high == va)
1167 			pa = hp->pmh_pte[0].low & PG_FRAME;
1168 		else if (hp->pmh_pte[1].high == va)
1169 			pa = hp->pmh_pte[1].low & PG_FRAME;
1170 		else
1171 			pa = 0;
1172 	} else
1173 		pa = 0;
1174 
1175 #ifdef DEBUG
1176 	if (pmapdebug & PDB_FOLLOW)
1177 		printf("%x\n", pa);
1178 #endif
1179 	return (pa);
1180 }
1181 
1182 /*
1183  *	Copy the range specified by src_addr/len
1184  *	from the source map to the range dst_addr/len
1185  *	in the destination map.
1186  *
1187  *	This routine is only advisory and need not do anything.
1188  */
1189 void
1190 pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1191 	pmap_t dst_pmap;
1192 	pmap_t src_pmap;
1193 	vm_offset_t dst_addr;
1194 	vm_size_t len;
1195 	vm_offset_t src_addr;
1196 {
1197 
1198 #ifdef DEBUG
1199 	if (pmapdebug & PDB_FOLLOW)
1200 		printf("pmap_copy(%x, %x, %x, %x, %x)\n",
1201 		       dst_pmap, src_pmap, dst_addr, len, src_addr);
1202 #endif
1203 }
1204 
1205 /*
1206  *	Require that all active physical maps contain no
1207  *	incorrect entries NOW.  [This update includes
1208  *	forcing updates of any address map caching.]
1209  *
1210  *	Generally used to insure that a thread about
1211  *	to run will see a semantically correct world.
1212  */
1213 void
1214 pmap_update()
1215 {
1216 
1217 #ifdef DEBUG
1218 	if (pmapdebug & PDB_FOLLOW)
1219 		printf("pmap_update()\n");
1220 #endif
1221 }
1222 
1223 /*
1224  *	Routine:	pmap_collect
1225  *	Function:
1226  *		Garbage collects the physical map system for
1227  *		pages which are no longer used.
1228  *		Success need not be guaranteed -- that is, there
1229  *		may well be pages which are not referenced, but
1230  *		others may be collected.
1231  *	Usage:
1232  *		Called by the pageout daemon when pages are scarce.
1233  */
1234 void
1235 pmap_collect(pmap)
1236 	pmap_t pmap;
1237 {
1238 
1239 #ifdef DEBUG
1240 	if (pmapdebug & PDB_FOLLOW)
1241 		printf("pmap_collect(%x)\n", pmap);
1242 #endif
1243 }
1244 
1245 /*
1246  *	pmap_zero_page zeros the specified (machine independent)
1247  *	page.
1248  */
1249 void
1250 pmap_zero_page(phys)
1251 	vm_offset_t phys;
1252 {
1253 	register int *p, *end;
1254 
1255 #ifdef DEBUG
1256 	if (pmapdebug & PDB_FOLLOW)
1257 		printf("pmap_zero_page(%x)\n", phys);
1258 #endif
1259 	p = (int *)MACH_PHYS_TO_CACHED(phys);
1260 	end = p + PAGE_SIZE / sizeof(int);
1261 	do {
1262 		p[0] = 0;
1263 		p[1] = 0;
1264 		p[2] = 0;
1265 		p[3] = 0;
1266 		p += 4;
1267 	} while (p != end);
1268 }
1269 
1270 /*
1271  *	pmap_copy_page copies the specified (machine independent)
1272  *	page.
1273  */
1274 void
1275 pmap_copy_page(src, dst)
1276 	vm_offset_t src, dst;
1277 {
1278 	register int *s, *d, *end;
1279 	register int tmp0, tmp1, tmp2, tmp3;
1280 
1281 #ifdef DEBUG
1282 	if (pmapdebug & PDB_FOLLOW)
1283 		printf("pmap_copy_page(%x, %x)\n", src, dst);
1284 #endif
1285 	s = (int *)MACH_PHYS_TO_CACHED(src);
1286 	d = (int *)MACH_PHYS_TO_CACHED(dst);
1287 	end = s + PAGE_SIZE / sizeof(int);
1288 	do {
1289 		tmp0 = s[0];
1290 		tmp1 = s[1];
1291 		tmp2 = s[2];
1292 		tmp3 = s[3];
1293 		d[0] = tmp0;
1294 		d[1] = tmp1;
1295 		d[2] = tmp2;
1296 		d[3] = tmp3;
1297 		s += 4;
1298 		d += 4;
1299 	} while (s != end);
1300 }
1301 
1302 /*
1303  *	Routine:	pmap_pageable
1304  *	Function:
1305  *		Make the specified pages (by pmap, offset)
1306  *		pageable (or not) as requested.
1307  *
1308  *		A page which is not pageable may not take
1309  *		a fault; therefore, its page table entry
1310  *		must remain valid for the duration.
1311  *
1312  *		This routine is merely advisory; pmap_enter
1313  *		will specify that these pages are to be wired
1314  *		down (or not) as appropriate.
1315  */
1316 void
1317 pmap_pageable(pmap, sva, eva, pageable)
1318 	pmap_t		pmap;
1319 	vm_offset_t	sva, eva;
1320 	boolean_t	pageable;
1321 {
1322 
1323 #ifdef DEBUG
1324 	if (pmapdebug & PDB_FOLLOW)
1325 		printf("pmap_pageable(%x, %x, %x, %x)\n",
1326 		       pmap, sva, eva, pageable);
1327 #endif
1328 }
1329 
1330 /*
1331  *	Clear the modify bits on the specified physical page.
1332  */
1333 void
1334 pmap_clear_modify(pa)
1335 	vm_offset_t pa;
1336 {
1337 	pmap_hash_t hp;
1338 
1339 #ifdef DEBUG
1340 	if (pmapdebug & PDB_FOLLOW)
1341 		printf("pmap_clear_modify(%x)\n", pa);
1342 #endif
1343 #ifdef ATTR
1344 	pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_MOD;
1345 #endif
1346 }
1347 
1348 /*
1349  *	pmap_clear_reference:
1350  *
1351  *	Clear the reference bit on the specified physical page.
1352  */
1353 void
1354 pmap_clear_reference(pa)
1355 	vm_offset_t pa;
1356 {
1357 
1358 #ifdef DEBUG
1359 	if (pmapdebug & PDB_FOLLOW)
1360 		printf("pmap_clear_reference(%x)\n", pa);
1361 #endif
1362 #ifdef ATTR
1363 	pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_REF;
1364 #endif
1365 }
1366 
1367 /*
1368  *	pmap_is_referenced:
1369  *
1370  *	Return whether or not the specified physical page is referenced
1371  *	by any physical maps.
1372  */
1373 boolean_t
1374 pmap_is_referenced(pa)
1375 	vm_offset_t pa;
1376 {
1377 #ifdef ATTR
1378 	return (pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_REF);
1379 #else
1380 	return (FALSE);
1381 #endif
1382 }
1383 
1384 /*
1385  *	pmap_is_modified:
1386  *
1387  *	Return whether or not the specified physical page is modified
1388  *	by any physical maps.
1389  */
1390 boolean_t
1391 pmap_is_modified(pa)
1392 	vm_offset_t pa;
1393 {
1394 #ifdef ATTR
1395 	return (pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_MOD);
1396 #else
1397 	return (FALSE);
1398 #endif
1399 }
1400 
1401 vm_offset_t
1402 pmap_phys_address(ppn)
1403 	int ppn;
1404 {
1405 
1406 #ifdef DEBUG
1407 	if (pmapdebug & PDB_FOLLOW)
1408 		printf("pmap_phys_address(%x)\n", ppn);
1409 #endif
1410 	return (pmax_ptob(ppn));
1411 }
1412 
1413 /*
1414  * Miscellaneous support routines
1415  */
1416 
1417 /*
1418  * Allocate a hardware PID and return it.
1419  * Also, change the hardwired TLB entry to point to the current TLB cache.
1420  * This is called by swtch().
1421  */
1422 int
1423 pmap_alloc_tlbpid(p)
1424 	register struct proc *p;
1425 {
1426 	register pmap_t pmap;
1427 	register u_int i;
1428 	register int id;
1429 
1430 	pmap = &p->p_vmspace->vm_pmap;
1431 	if ((id = pmap->pm_tlbpid) >= 0) {
1432 		if (pmap->pm_flags & PM_MODIFIED) {
1433 			pmap->pm_flags &= ~PM_MODIFIED;
1434 			MachTLBFlushPID(id);
1435 		}
1436 		goto done;
1437 	}
1438 
1439 	if ((i = whichpids[0]) != 0xFFFFFFFF)
1440 		id = 0;
1441 	else if ((i = whichpids[1]) != 0xFFFFFFFF)
1442 		id = 32;
1443 	else {
1444 		register struct proc *q;
1445 		register pmap_t q_pmap;
1446 
1447 		/*
1448 		 * Have to find a tlbpid to recycle.
1449 		 * There is probably a better way to do this.
1450 		 */
1451 		for (q = (struct proc *)allproc; q != NULL; q = q->p_nxt) {
1452 			q_pmap = &q->p_vmspace->vm_pmap;
1453 			if ((id = q_pmap->pm_tlbpid) < 0)
1454 				continue;
1455 			if (q->p_stat != SRUN)
1456 				goto fnd;
1457 		}
1458 		if (id < 0)
1459 			panic("TLBPidAlloc");
1460 	fnd:
1461 		printf("pmap_alloc_tlbpid: recycle pid %d (%s) tlbpid %d\n",
1462 			q->p_pid, q->p_comm, id); /* XXX */
1463 		/*
1464 		 * Even though the virtual to physical mapping hasn't changed,
1465 		 * we need to clear the PID tag in the high entry of the cache.
1466 		 */
1467 		if (q_pmap->pm_hash != zero_pmap_hash) {
1468 			register pmap_hash_t hp;
1469 			register int j;
1470 
1471 			hp = q_pmap->pm_hash;
1472 			for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) {
1473 			    for (j = 0; j < 2; j++) {
1474 				if (!hp->pmh_pte[j].high)
1475 					continue;
1476 
1477 				if (hp->pmh_pte[j].low & PG_WIRED) {
1478 					printf("Clearing wired user entry! h %x l %x\n", hp->pmh_pte[j].high, hp->pmh_pte[j].low);
1479 					panic("pmap_alloc_tlbpid: wired");
1480 				}
1481 				pmap_remove_pv(q_pmap,
1482 					hp->pmh_pte[j].high & PG_FRAME,
1483 					hp->pmh_pte[j].low & PG_FRAME);
1484 				hp->pmh_pte[j].high = 0;
1485 				q_pmap->pm_stats.resident_count--;
1486 			    }
1487 			}
1488 		}
1489 		q_pmap->pm_tlbpid = -1;
1490 		MachTLBFlushPID(id);
1491 #ifdef DEBUG
1492 		remove_stats.pidflushes++;
1493 #endif
1494 		pmap->pm_tlbpid = id;
1495 		goto done;
1496 	}
1497 	while (i & 1) {
1498 		i >>= 1;
1499 		id++;
1500 	}
1501 	whichpids[id >> 5] |= 1 << (id & 0x1F);
1502 	pmap->pm_tlbpid = id;
1503 done:
1504 	/*
1505 	 * Map in new TLB cache.
1506 	 */
1507 	if (pmap == cur_pmap)
1508 		return (id);
1509 	cur_pmap = pmap;
1510 	for (i = 0; i < PMAP_HASH_UPAGES; i++) {
1511 		MachTLBWriteIndexed(i + UPAGES,
1512 			(PMAP_HASH_UADDR + (i << PGSHIFT)) |
1513 				(id << VMMACH_TLB_PID_SHIFT),
1514 			pmap->pm_hash_ptes[i]);
1515 	}
1516 	return (id);
1517 }
1518 
1519 /*
1520  * Remove a physical to virtual address translation.
1521  */
1522 void
1523 pmap_remove_pv(pmap, va, pa)
1524 	pmap_t pmap;
1525 	vm_offset_t va, pa;
1526 {
1527 	register pv_entry_t pv, npv;
1528 	int s;
1529 
1530 #ifdef DEBUG
1531 	if (pmapdebug & PDB_FOLLOW)
1532 		printf("pmap_remove_pv(%x, %x, %x)\n", pmap, va, pa);
1533 #endif
1534 	/*
1535 	 * Remove page from the PV table (raise IPL since we
1536 	 * may be called at interrupt time).
1537 	 */
1538 	if (!IS_VM_PHYSADDR(pa))
1539 		return;
1540 	pv = pa_to_pvh(pa);
1541 	s = splimp();
1542 	/*
1543 	 * If it is the first entry on the list, it is actually
1544 	 * in the header and we must copy the following entry up
1545 	 * to the header.  Otherwise we must search the list for
1546 	 * the entry.  In either case we free the now unused entry.
1547 	 */
1548 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
1549 		npv = pv->pv_next;
1550 		if (npv) {
1551 			*pv = *npv;
1552 			free((caddr_t)npv, M_VMPVENT);
1553 		} else
1554 			pv->pv_pmap = NULL;
1555 #ifdef DEBUG
1556 		remove_stats.pvfirst++;
1557 #endif
1558 	} else {
1559 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
1560 #ifdef DEBUG
1561 			remove_stats.pvsearch++;
1562 #endif
1563 			if (pmap == npv->pv_pmap && va == npv->pv_va)
1564 				goto fnd;
1565 		}
1566 #ifdef DIAGNOSTIC
1567 		printf("pmap_remove_pv(%x, %x, %x) not found\n", pmap, va, pa);
1568 		panic("pmap_remove_pv");
1569 #endif
1570 	fnd:
1571 		pv->pv_next = npv->pv_next;
1572 		free((caddr_t)npv, M_VMPVENT);
1573 	}
1574 	splx(s);
1575 }
1576 
1577 #ifdef DEBUG
1578 pmap_print(pmap)
1579 	pmap_t pmap;
1580 {
1581 	register pmap_hash_t hp;
1582 	register int i, j;
1583 
1584 	printf("\tpmap_print(%x)\n", pmap);
1585 
1586 	if (pmap->pm_hash == zero_pmap_hash) {
1587 		printf("pm_hash == zero\n");
1588 		return;
1589 	}
1590 	if (pmap->pm_hash == (pmap_hash_t)0) {
1591 		printf("pm_hash == kernel\n");
1592 		return;
1593 	}
1594 	hp = pmap->pm_hash;
1595 	for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) {
1596 	    for (j = 0; j < 2; j++) {
1597 		if (!hp->pmh_pte[j].high)
1598 			continue;
1599 		printf("%d: hi %x low %x\n", i, hp->pmh_pte[j].high, hp->pmh_pte[j].low);
1600 	    }
1601 	}
1602 }
1603 #endif
1604