xref: /netbsd/sys/arch/hppa/hppa/pmap.c (revision c4a72b64)
1 /*	$NetBSD: pmap.c,v 1.5 2002/08/25 20:19:59 fredette Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matthew Fredette.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by the NetBSD
21  *      Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*	$OpenBSD: pmap.c,v 1.46 2001/07/25 13:25:31 art Exp $	*/
40 
41 /*
42  * Copyright (c) 1998-2001 Michael Shalayeff
43  * All rights reserved.
44  *
45  * Redistribution and use in source and binary forms, with or without
46  * modification, are permitted provided that the following conditions
47  * are met:
48  * 1. Redistributions of source code must retain the above copyright
49  *    notice, this list of conditions and the following disclaimer.
50  * 2. Redistributions in binary form must reproduce the above copyright
51  *    notice, this list of conditions and the following disclaimer in the
52  *    documentation and/or other materials provided with the distribution.
53  * 3. All advertising materials mentioning features or use of this software
54  *    must display the following acknowledgement:
55  *	This product includes software developed by Michael Shalayeff.
56  * 4. The name of the author may not be used to endorse or promote products
57  *    derived from this software without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
60  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
61  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
63  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
64  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
65  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
66  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
67  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
68  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
69  */
70 /*
71  * Copyright 1996 1995 by Open Software Foundation, Inc.
72  *              All Rights Reserved
73  *
74  * Permission to use, copy, modify, and distribute this software and
75  * its documentation for any purpose and without fee is hereby granted,
76  * provided that the above copyright notice appears in all copies and
77  * that both the copyright notice and this permission notice appear in
78  * supporting documentation.
79  *
80  * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
81  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
82  * FOR A PARTICULAR PURPOSE.
83  *
84  * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
85  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
86  * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
87  * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
88  * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
89  */
90 /*
91  * Mach Operating System
92  * Copyright (c) 1990,1991,1992,1993,1994 The University of Utah and
93  * the Computer Systems Laboratory (CSL).
94  * Copyright (c) 1991,1987 Carnegie Mellon University.
95  * All rights reserved.
96  *
97  * Permission to use, copy, modify and distribute this software and its
98  * documentation is hereby granted, provided that both the copyright
99  * notice and this permission notice appear in all copies of the
100  * software, derivative works or modified versions, and any portions
101  * thereof, and that both notices appear in supporting documentation,
102  * and that all advertising materials mentioning features or use of
103  * this software display the following acknowledgement: ``This product
104  * includes software developed by the Computer Systems Laboratory at
105  * the University of Utah.''
106  *
107  * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
108  * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
109  * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
110  * THIS SOFTWARE.
111  *
112  * CSL requests users of this software to return to csl-dist@cs.utah.edu any
113  * improvements that they make and grant CSL redistribution rights.
114  *
115  * Carnegie Mellon requests users of this software to return to
116  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
117  *  School of Computer Science
118  *  Carnegie Mellon University
119  *  Pittsburgh PA 15213-3890
120  * any improvements or extensions that they make and grant Carnegie Mellon
121  * the rights to redistribute these changes.
122  *
123  * 	Utah $Hdr: pmap.c 1.49 94/12/15$
124  *	Author: Mike Hibler, Bob Wheeler, University of Utah CSL, 10/90
125  */
126 /*
127  *	Manages physical address maps for hppa.
128  *
129  *	In addition to hardware address maps, this
130  *	module is called upon to provide software-use-only
131  *	maps which may or may not be stored in the same
132  *	form as hardware maps.  These pseudo-maps are
133  *	used to store intermediate results from copy
134  *	operations to and from address spaces.
135  *
136  *	Since the information managed by this module is
137  *	also stored by the logical address mapping module,
138  *	this module may throw away valid virtual-to-physical
139  *	mappings at almost any time.  However, invalidations
140  *	of virtual-to-physical mappings must be done as
141  *	requested.
142  *
143  *	In order to cope with hardware architectures which
144  *	make virtual-to-physical map invalidates expensive,
145  *	this module may delay invalidate or reduced protection
146  *	operations until such time as they are actually
147  *	necessary.  This module is given full information to
148  *	when physical maps must be made correct.
149  *
150  */
151 /*
152  * CAVEATS:
153  *
154  *	PAGE_SIZE must equal NBPG
155  *	Needs more work for MP support
156  *	page maps are stored as linear linked lists, some
157  *		improvement may be achieved should we use smth else
158  *	protection id (pid) allocation should be done in a pid_t fashion
159  *		(maybe just use the pid itself)
160  *	some ppl say, block tlb entries should be maintained somewhere in uvm
161  *		and be ready for reloads in the fault handler.
162  *	usage of __inline grows the code size by 100%, but hopefully
163  *		makes it faster as well, since the functions are actually
164  *		very small.
165  *		retail:  8.1k -> 15.1K
166  *		debug:  12.2k -> 22.1K
167  *
168  * References:
169  * 1. PA7100LC ERS, Hewlett-Packard, March 30 1999, Public version 1.0
170  * 2. PA7300LC ERS, Hewlett-Packard, March 18 1996, Version 1.0
171  *
172  */
173 
174 #include <sys/param.h>
175 #include <sys/systm.h>
176 #include <sys/lock.h>
177 #include <sys/malloc.h>
178 #include <sys/user.h>
179 #include <sys/proc.h>
180 
181 #include <uvm/uvm.h>
182 
183 #include <machine/reg.h>
184 #include <machine/psl.h>
185 #include <machine/cpu.h>
186 #include <machine/pmap.h>
187 #include <machine/pte.h>
188 #include <machine/cpufunc.h>
189 
190 #include <hppa/hppa/hpt.h>
191 #include <hppa/hppa/machdep.h>
192 
193 #define static	/**/
194 #define	__inline /* */
195 
196 #ifdef PMAPDEBUG
197 #define	PDB_INIT	0x00000002
198 #define	PDB_ENTER	0x00000004
199 #define	PDB_REMOVE	0x00000008
200 #define	PDB_KENTER	0x00000010
201 #define	PDB_PMAP	0x00000020
202 #define	PDB_CACHE	0x00000040
203 #define	PDB_BITS	0x00000080
204 #define	PDB_EXTRACT	0x00000100
205 #define	PDB_PROTECT	0x00000200
206 #define	PDB_PV_ALLOC	0x00000400
207 #define	PDB_PV_ENTER	0x00000800
208 #define	PDB_PV_REMOVE	0x00001000
209 #define	PDB_PV_FIND_VA	0x00002000
210 #define	PDB_WIRING	0x00004000
211 #define	PDB_ZERO	0x00008000
212 #define	PDB_STEAL	0x00010000
213 #define	PDB_COPY	0x00020000
214 int pmapdebug = 0
215 #if 1
216 	| PDB_ENTER
217 	| PDB_REMOVE
218 	| PDB_KENTER
219 	| PDB_BITS
220 	| PDB_PROTECT
221 	| PDB_EXTRACT
222 	| PDB_WIRING
223 	| PDB_ZERO
224 	| PDB_STEAL
225 	| PDB_COPY
226 #endif
227 	;
228 #define PMAP_PRINTF_MASK(m,v,x) do {	\
229   if ((pmapdebug & (m)) == (v)) {	\
230     printf("%s", __FUNCTION__);		\
231     printf x;				\
232   }					\
233 } while(/* CONSTCOND */ 0)
234 #else
235 #define PMAP_PRINTF_MASK(m,v,x) do { } while(/* CONSTCOND */ 0)
236 #endif
237 #define PMAP_PRINTF(v,x) PMAP_PRINTF_MASK(v,v,x)
238 
239 vaddr_t	virtual_steal, virtual_start, virtual_end;
240 
241 /* These two virtual pages are available for copying and zeroing. */
242 static vaddr_t tmp_vpages[2];
243 
244 /* Free list of PV entries. */
245 static struct pv_entry *pv_free_list;
246 
247 /* This is an array of struct pv_head, one per physical page. */
248 static struct pv_head *pv_head_tbl;
249 
250 /*
251  * This is a bitmap of page-is-aliased bits.
252  * The magic 5 is log2(sizeof(u_int) * 8), and the magic 31 is 2^5 - 1.
253  */
254 static u_int *page_aliased_bitmap;
255 #define _PAGE_ALIASED_WORD(pa) page_aliased_bitmap[((pa) >> PGSHIFT) >> 5]
256 #define _PAGE_ALIASED_BIT(pa) (1 << (((pa) >> PGSHIFT) & 31))
257 #define PAGE_IS_ALIASED(pa) (_PAGE_ALIASED_WORD(pa) & _PAGE_ALIASED_BIT(pa))
258 
259 struct pmap	kernel_pmap_store;
260 pmap_t		kernel_pmap;
261 boolean_t	pmap_initialized = FALSE;
262 
263 TAILQ_HEAD(, pmap)	pmap_freelist;	/* list of free pmaps */
264 u_int pmap_nfree;
265 struct simplelock pmap_freelock;	/* and lock */
266 
267 struct simplelock pmap_lock;	/* XXX this is all broken */
268 struct simplelock sid_pid_lock;	/* pids */
269 
270 u_int	pages_per_vm_page;
271 u_int	pid_counter;
272 
273 #ifdef PMAPDEBUG
274 void pmap_hptdump __P((void));
275 #endif
276 
277 u_int	kern_prot[8], user_prot[8];
278 
279 vaddr_t	hpt_base;
280 vsize_t	hpt_mask;
281 
282 #define	pmap_sid(pmap, va) \
283 	(((va & 0xc0000000) != 0xc0000000)? pmap->pmap_space : HPPA_SID_KERNEL)
284 
285 /*
286  * Page 3-6 of the "PA-RISC 1.1 Architecture and Instruction Set
287  * Reference Manual" (HP part number 09740-90039) defines equivalent
288  * and non-equivalent virtual addresses in the cache.
289  *
290  * This macro evaluates to TRUE iff the two space/virtual address
291  * combinations are non-equivalent aliases, and therefore will find
292  * two different locations in the cache.
293  *
294  * NB: currently, the CPU-specific desidhash() functions disable the
295  * use of the space in all cache hashing functions.  This means that
296  * this macro definition is stricter than it has to be (because it
297  * takes space into account), but one day cache space hashing should
298  * be re-enabled.  Cache space hashing should yield better performance
299  * through better utilization of the cache, assuming that most aliasing
300  * is the read-only kind, which we do allow in the cache.
301  */
302 #define NON_EQUIVALENT_ALIAS(sp1, va1, sp2, va2) \
303   (((((va1) ^ (va2)) & ~HPPA_PGAMASK) != 0) || \
304    ((((sp1) ^ (sp2)) & ~HPPA_SPAMASK) != 0))
305 
306 /* Prototypes. */
307 void __pmap_pv_update __P((paddr_t, struct pv_entry *, u_int, u_int));
308 static __inline void pmap_pv_remove __P((struct pv_entry *));
309 
310 /*
311  * Given a directly-mapped region, this makes pv_entries out of it and
312  * adds them to the free list.
313  */
314 static __inline void pmap_pv_add __P((vaddr_t, vaddr_t));
315 static __inline void
316 pmap_pv_add(vaddr_t pv_start, vaddr_t pv_end)
317 {
318 	struct pv_entry *pv;
319 	int s;
320 
321 	/* Align pv_start, then add the new pv_entries. */
322 	pv_start = (pv_start + sizeof(*pv) - 1) & ~(sizeof(*pv) - 1);
323 	pv = (struct pv_entry *) pv_start;
324 	s = splvm();
325 	while ((vaddr_t)(pv + 1) <= pv_end) {
326 		pv->pv_next = pv_free_list;
327 		pv_free_list = pv;
328 		pv++;
329 	}
330 	splx(s);
331 
332 	PMAP_PRINTF(PDB_INIT, (": %d pv_entries @ %x allocated\n",
333 		    (pv - (struct pv_entry *) pv_start), (u_int)pv_start));
334 }
335 
336 /*
337  * This allocates and returns a new struct pv_entry.
338  *
339  * If we run out of preallocated struct pv_entries, we have to forcibly
340  * free one.   malloc() isn't an option, because a) we'll probably end
341  * up back here anyways when malloc() maps what it's trying to return to
342  * us, and b) even if malloc() did succeed, the TLB fault handlers run
343  * in physical mode and thus require that all pv_entries be directly
344  * mapped, a quality unlikely for malloc()-returned memory.
345  */
346 static __inline struct pv_entry *pmap_pv_alloc __P((void));
347 static __inline struct pv_entry *
348 pmap_pv_alloc(void)
349 {
350 	struct pv_entry *pv, *pv_fallback;
351 	u_int hpt_index_first, hpt_index, hpt_size;
352 	struct hpt_entry *hpt;
353 
354 	pv = pv_free_list;
355 	if (pv == NULL) {
356 		/*
357 		 * We need to find a struct pv_entry to forcibly
358 		 * free.  It cannot be wired.  We prefer to free
359 		 * mappings that aren't marked as referenced.  We
360 		 * search the HPT for an entry to free, starting
361 		 * at a semirandom HPT index determined by the
362 		 * current value of the interval timer.
363 		 */
364 		hpt_size = hpt_mask / sizeof(*hpt);
365 		mfctl(CR_ITMR, hpt_index_first);
366 		hpt_index = hpt_index_first = hpt_index_first & hpt_size;
367 		pv_fallback = NULL;
368 		do {
369 			hpt = ((struct hpt_entry *) hpt_base) + hpt_index;
370 			for (pv = hpt->hpt_entry;
371 			     pv != NULL;
372 			     pv = pv->pv_hash) {
373 				if (!(pv->pv_tlbprot & TLB_WIRED)) {
374 					if (!(pv->pv_tlbprot & TLB_REF))
375 						break;
376 					pv_fallback = pv;
377 				}
378 			}
379 			if (pv != NULL)
380 				break;
381 			if (pv_fallback != NULL) {
382 				pv = pv_fallback;
383 				break;
384 			}
385 			hpt_index = (hpt_index + 1) & hpt_size;
386 		} while (hpt_index != hpt_index_first);
387 
388 		/* Remove the mapping. */
389 		if (pv != NULL) {
390 			KASSERT(pv->pv_pmap->pmap_stats.resident_count > 0);
391 			pv->pv_pmap->pmap_stats.resident_count--;
392 			pmap_pv_remove(pv);
393 			pv = pv_free_list;
394 		}
395 
396 		if (pv == NULL)
397 			panic("out of pv_entries");
398 
399 	}
400 	pv_free_list = pv->pv_next;
401 	pv->pv_next = NULL;
402 
403 	PMAP_PRINTF(PDB_PV_ALLOC, ("() = %p\n", pv));
404 	return pv;
405 }
406 
407 /*
408  * Given a struct pv_entry allocated by pmap_pv_alloc, this frees it.
409  */
410 static __inline void pmap_pv_free __P((struct pv_entry *));
411 static __inline void
412 pmap_pv_free(struct pv_entry *pv)
413 {
414 	PMAP_PRINTF(PDB_PV_ALLOC, ("(%p)\n", pv));
415 
416 	pv->pv_next = pv_free_list;
417 	pv_free_list = pv;
418 }
419 
420 /*
421  * Given a VA, this hashes it into an HPT index.
422  *
423  * This hash function is the one used by the hardware TLB filler on
424  * the 7100LC, to index the hardware page table (HPT), which is sort
425  * of a cache of TLB entries.
426  *
427  * On other CPUs, locore.S has a software TLB filler that does exactly
428  * the same thing, right down to using this same hash function.
429  *
430  * This HPT is also used as a general VA->PA mapping store, with
431  * struct pv_entry chains hanging off of the HPT entries.
432  */
433 static __inline struct hpt_entry *pmap_hpt_hash __P((pa_space_t, vaddr_t));
434 static __inline struct hpt_entry *
435 pmap_hpt_hash(pa_space_t sp, vaddr_t va)
436 {
437 	struct hpt_entry *hpt;
438 	__asm __volatile (
439 		"extru	%2, 23, 20, %%r22\n\t"	/* r22 = (va >> 8) */
440 		"zdep	%1, 22, 16, %%r23\n\t"	/* r23 = (sp << 9) */
441 		"dep	%%r0, 31, 4, %%r22\n\t"	/* r22 &= ~0xf */
442 		"xor	%%r22,%%r23, %%r23\n\t"	/* r23 ^= r22 */
443 		"mfctl	%%cr24, %%r22\n\t"	/* r22 = sizeof(HPT)-1 */
444 		"and	%%r22,%%r23, %%r23\n\t"	/* r23 &= r22 */
445 		"mfctl	%%cr25, %%r22\n\t"	/* r22 = addr of HPT table */
446 		"or	%%r23, %%r22, %0"	/* %0 = HPT entry */
447 		: "=r" (hpt) : "r" (sp), "r" (va) : "r22", "r23");
448 	return hpt;
449 }
450 
451 /*
452  * Given a PA, returns the table offset for it.
453  */
454 static __inline int pmap_table_find_pa __P((paddr_t));
455 static __inline int
456 pmap_table_find_pa(paddr_t pa)
457 {
458 	int off;
459 
460 	off = atop(pa);
461 	return (off < totalphysmem) ? off : -1;
462 }
463 
464 /*
465  * Given a PA, returns the first mapping for it.
466  */
467 static __inline struct pv_entry *pmap_pv_find_pa __P((paddr_t));
468 static __inline struct pv_entry *
469 pmap_pv_find_pa(paddr_t pa)
470 {
471 	int table_off;
472 
473 	table_off = pmap_table_find_pa(pa);
474 	KASSERT(table_off >= 0);
475 	return pv_head_tbl[table_off].pv_head_pvs;
476 }
477 
478 /*
479  * Given a VA, this finds any mapping for it.
480  */
481 static __inline struct pv_entry *pmap_pv_find_va __P((pa_space_t, vaddr_t));
482 static __inline struct pv_entry *
483 pmap_pv_find_va(pa_space_t space, vaddr_t va)
484 {
485 	struct pv_entry *pv = pmap_hpt_hash(space, va)->hpt_entry;
486 
487 	while(pv && (pv->pv_va != va || pv->pv_space != space))
488 		pv = pv->pv_hash;
489 
490 	PMAP_PRINTF(PDB_PV_FIND_VA, ("(0x%x:%p) = %p\n",
491 					  space, (caddr_t)va, pv));
492 	return pv;
493 }
494 
495 /*
496  * Given a page's PA, checks for non-equivalent aliasing,
497  * and stores and returns the result.
498  */
499 static int pmap_pv_check_alias __P((paddr_t));
500 static int
501 pmap_pv_check_alias(paddr_t pa)
502 {
503 	struct pv_entry *pv_outer, *pv;
504 	pa_space_t space;
505 	vaddr_t va;
506 	int aliased;
507 	u_int *aliased_word, aliased_bit;
508 
509 	/* By default we find no aliasing. */
510 	aliased = FALSE;
511 
512 	/*
513 	 * We should never get called on I/O pages.
514 	 */
515 	KASSERT(pa < HPPA_IOSPACE);
516 
517 	/*
518 	 * Make an outer loop over the mappings, checking
519 	 * each following inner mapping for non-equivalent
520 	 * aliasing.  If the non-equivalent alias relation
521 	 * is deemed transitive, this outer loop only needs
522 	 * one iteration.
523 	 */
524 	for (pv_outer = pmap_pv_find_pa(pa);
525 	     pv_outer != NULL;
526 	     pv_outer = pv_outer->pv_next) {
527 
528 		/* Load this outer mapping's space and address. */
529 		space = pv_outer->pv_space;
530 		va = pv_outer->pv_va;
531 
532 		/* Do the inner loop. */
533 		for (pv = pv_outer->pv_next;
534 		     pv != NULL;
535 		     pv = pv->pv_next) {
536 			if (NON_EQUIVALENT_ALIAS(space, va,
537 				pv->pv_space, pv->pv_va)) {
538 				aliased = TRUE;
539 				break;
540 			}
541 		}
542 
543 #ifndef NON_EQUIVALENT_ALIAS_TRANSITIVE
544 		if (aliased)
545 #endif /* !NON_EQUIVALENT_ALIAS_TRANSITIVE */
546 			break;
547 	}
548 
549 	/* Store and return the result. */
550 	aliased_word = &_PAGE_ALIASED_WORD(pa);
551 	aliased_bit = _PAGE_ALIASED_BIT(pa);
552 	*aliased_word = (*aliased_word & ~aliased_bit) |
553 		(aliased ? aliased_bit : 0);
554 	return aliased;
555 }
556 
557 /*
558  * Given a VA->PA mapping and tlbprot bits to clear and set,
559  * this flushes the mapping from the TLB and cache, and changes
560  * the protection accordingly.  This is used when a mapping is
561  * changing.
562  */
563 static __inline void _pmap_pv_update __P((paddr_t, struct pv_entry *,
564 					  u_int, u_int));
565 static __inline void
566 _pmap_pv_update(paddr_t pa, struct pv_entry *pv,
567 		u_int tlbprot_clear, u_int tlbprot_set)
568 {
569 	struct pv_entry *ppv;
570 	int no_rw_alias;
571 
572 	/*
573 	 * We should never get called on I/O pages.
574 	 */
575 	KASSERT(pa < HPPA_IOSPACE);
576 
577 	/*
578 	 * If the TLB protection of this mapping is changing,
579 	 * check for a change in the no read-write alias state
580 	 * of the page.
581 	 */
582 	KASSERT((tlbprot_clear & TLB_AR_MASK) == 0 ||
583 		(tlbprot_clear & TLB_AR_MASK) == TLB_AR_MASK);
584 	if (tlbprot_clear & TLB_AR_MASK) {
585 
586 		/*
587 		 * Assume that no read-write aliasing
588 		 * exists.  It does exist if this page is
589 		 * aliased and any mapping is writable.
590 		 */
591 		no_rw_alias = TLB_NO_RW_ALIAS;
592 		if (PAGE_IS_ALIASED(pa)) {
593 			for (ppv = pmap_pv_find_pa(pa);
594 			     ppv != NULL;
595 			     ppv = ppv->pv_next) {
596 				if (TLB_AR_WRITABLE(ppv == pv ?
597 						    tlbprot_set :
598 						    ppv->pv_tlbprot)) {
599 					no_rw_alias = 0;
600 					break;
601 				}
602 			}
603 		}
604 
605 		/* Note if the no read-write alias state has changed. */
606 		if ((pv->pv_tlbprot & TLB_NO_RW_ALIAS) ^ no_rw_alias) {
607 			tlbprot_clear |= TLB_NO_RW_ALIAS;
608 			tlbprot_set |= no_rw_alias;
609 		}
610 	}
611 
612 	/*
613 	 * Now call our asm helper function.  At the very least,
614 	 * this will flush out the requested mapping and change
615 	 * its protection.  If the changes touch any of TLB_REF,
616 	 * TLB_DIRTY, and TLB_NO_RW_ALIAS, all mappings of the
617 	 * page will be flushed and changed.
618 	 */
619 	__pmap_pv_update(pa, pv, tlbprot_clear, tlbprot_set);
620 }
621 #define pmap_pv_update(pv, tc, ts) \
622 	_pmap_pv_update(tlbptob((pv)->pv_tlbpage), pv, tc, ts)
623 
624 /*
625  * Given a pmap, a VA, a PA, and a TLB protection, this enters
626  * a new mapping and returns the new struct pv_entry.
627  */
628 static __inline struct pv_entry *pmap_pv_enter __P((pmap_t, pa_space_t, vaddr_t, paddr_t, u_int));
629 static __inline struct pv_entry *
630 pmap_pv_enter(pmap_t pmap, pa_space_t space, vaddr_t va,
631 	      paddr_t pa, u_int tlbprot)
632 {
633 	struct hpt_entry *hpt = pmap_hpt_hash(space, va);
634 	int table_off;
635 	struct pv_head *hpv;
636 	struct pv_entry *pv, *pv_other;
637 
638 #ifdef DIAGNOSTIC
639 	/* Make sure this VA isn't already entered. */
640 	for (pv = hpt->hpt_entry; pv != NULL; pv = pv->pv_hash)
641 		if (pv->pv_va == va && pv->pv_space == space)
642 			panic("pmap_pv_enter: VA already entered");
643 #endif /* DIAGNOSTIC */
644 
645 	/*
646 	 * Allocate a new pv_entry, fill it, and link it into the HPT.
647 	 */
648 	pv = pmap_pv_alloc();
649 	pv->pv_va = va;
650 	pv->pv_pmap = pmap;
651 	pv->pv_space = space;
652 	pv->pv_tlbprot = tlbprot;
653 	pv->pv_tlbpage = tlbbtop(pa);
654 	pv->pv_hpt = hpt;
655 	pv->pv_hash = hpt->hpt_entry;
656 	hpt->hpt_entry = pv;
657 
658 	/*
659 	 * If this mapping is for I/O space, mark the mapping
660 	 * uncacheable.  (This is fine even on CPUs that don't
661 	 * support the U-bit; these CPUs don't cache references
662 	 * to I/O space.)  Also mark this mapping as having
663 	 * no read/write aliasing, and we're done - we don't
664 	 * keep PA->VA lists for I/O space.
665 	 */
666 	if (pa >= HPPA_IOSPACE) {
667 		KASSERT(tlbprot & TLB_UNMANAGED);
668 		pv->pv_tlbprot |= TLB_UNCACHEABLE | TLB_NO_RW_ALIAS;
669 		return pv;
670 	}
671 
672 	/* Get the head of the PA->VA translation list. */
673 	table_off = pmap_table_find_pa(pa);
674 	KASSERT(table_off >= 0);
675 	hpv = pv_head_tbl + table_off;
676 
677 #ifdef DIAGNOSTIC
678 	/* Make sure this VA isn't already entered. */
679 	for (pv_other = hpv->pv_head_pvs;
680 	     pv_other != NULL;
681 	     pv_other = pv_other->pv_next)
682 		if (pmap == pv_other->pv_pmap && va == pv_other->pv_va)
683 			panic("pmap_pv_enter: VA already in pv_tab");
684 #endif /* DIAGNOSTIC */
685 
686 	/*
687 	 * Link this mapping into the PA->VA list.
688 	 */
689 	pv_other = hpv->pv_head_pvs;
690 	pv->pv_next = pv_other;
691 	hpv->pv_head_pvs = pv;
692 
693 	/*
694 	 * If there are no other mappings of this page, this
695 	 * mapping has no read/write aliasing.  Otherwise, give
696 	 * this mapping the same TLB_NO_RW_ALIAS status as the
697 	 * other mapping (all mappings of the same page must
698 	 * always be marked the same).
699 	 */
700 	pv->pv_tlbprot |= (pv_other == NULL ?
701 			   TLB_NO_RW_ALIAS :
702 			   (pv_other->pv_tlbprot & TLB_NO_RW_ALIAS));
703 
704 	/* Check for read-write aliasing. */
705 	if (!PAGE_IS_ALIASED(pa))
706 		pmap_pv_check_alias(pa);
707 	_pmap_pv_update(pa, pv, TLB_AR_MASK, tlbprot & TLB_AR_MASK);
708 
709 	return pv;
710 }
711 
712 /*
713  * Given a particular VA->PA mapping, this removes it.
714  */
715 static __inline void
716 pmap_pv_remove(struct pv_entry *pv)
717 {
718 	paddr_t pa = tlbptob(pv->pv_tlbpage);
719 	int table_off;
720 	struct pv_head *hpv;
721 	struct pv_entry **_pv;
722 
723 	PMAP_PRINTF(PDB_PV_REMOVE, ("(%p)\n", pv));
724 
725 	/* Unlink this pv_entry from the HPT. */
726 	_pv = &pv->pv_hpt->hpt_entry;
727 	while (*_pv != pv) {
728 		KASSERT(*_pv != NULL);
729 		_pv = &(*_pv)->pv_hash;
730 	}
731 	*_pv = pv->pv_hash;
732 
733 	/*
734 	 * If this mapping is for I/O space, simply flush the
735 	 * old mapping, free it, and we're done.
736 	 */
737 	if (pa >= HPPA_IOSPACE) {
738 		__pmap_pv_update(pa, pv, 0, 0);
739 		pmap_pv_free(pv);
740 		return;
741 	}
742 
743 	/* Get the head of the PA->VA translation list. */
744 	table_off = pmap_table_find_pa(pa);
745 	KASSERT(table_off >= 0);
746 	hpv = pv_head_tbl + table_off;
747 
748 	/* Unlink this pv_entry from the PA->VA translation list. */
749 	_pv = &hpv->pv_head_pvs;
750 	while (*_pv != pv) {
751 		KASSERT(*_pv != NULL);
752 		_pv = &(*_pv)->pv_next;
753 	}
754 	*_pv = pv->pv_next;
755 
756 	/*
757 	 * Check for read-write aliasing.  This will also flush
758 	 * the old mapping.
759 	 */
760 	if (PAGE_IS_ALIASED(pa))
761 		pmap_pv_check_alias(pa);
762 	_pmap_pv_update(pa, pv, TLB_AR_MASK, TLB_AR_KR);
763 
764 	/* Free this mapping. */
765 	pmap_pv_free(pv);
766 }
767 
768 /*
769  *	Bootstrap the system enough to run with virtual memory.
770  *	Map the kernel's code and data, and allocate the system page table.
771  *	Called with mapping OFF.
772  *
773  *	Parameters:
774  *	vstart	PA of first available physical page
775  *	vend	PA of last available physical page
776  */
777 void
778 pmap_bootstrap(vstart, vend)
779 	vaddr_t *vstart;
780 	vaddr_t *vend;
781 {
782 	vaddr_t addr;
783 	vsize_t size;
784 	vaddr_t pv_region;
785 	struct hpt_entry *hptp;
786 #define BTLB_SET_SIZE 16
787 	vaddr_t btlb_entry_start[BTLB_SET_SIZE];
788 	vsize_t btlb_entry_size[BTLB_SET_SIZE];
789 	int btlb_entry_vm_prot[BTLB_SET_SIZE];
790 	int btlb_i, btlb_j;
791 	vsize_t btlb_entry_min, btlb_entry_max, btlb_entry_got;
792 	extern int kernel_text, etext;
793 	vaddr_t kernel_data;
794 	paddr_t phys_start, phys_end;
795 
796 	uvm_setpagesize();
797 
798 	pages_per_vm_page = PAGE_SIZE / NBPG;
799 	/* XXX for now */
800 	if (pages_per_vm_page != 1)
801 		panic("HPPA page != VM page");
802 
803 	kern_prot[VM_PROT_NONE | VM_PROT_NONE  | VM_PROT_NONE]    =TLB_AR_NA;
804 	kern_prot[VM_PROT_READ | VM_PROT_NONE  | VM_PROT_NONE]    =TLB_AR_KR;
805 	kern_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE]    =TLB_AR_KRW;
806 	kern_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE]    =TLB_AR_KRW;
807 	kern_prot[VM_PROT_NONE | VM_PROT_NONE  | VM_PROT_EXECUTE] =TLB_AR_KRX;
808 	kern_prot[VM_PROT_READ | VM_PROT_NONE  | VM_PROT_EXECUTE] =TLB_AR_KRX;
809 	kern_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_KRWX;
810 	kern_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_KRWX;
811 
812 	user_prot[VM_PROT_NONE | VM_PROT_NONE  | VM_PROT_NONE]    =TLB_AR_NA;
813 	user_prot[VM_PROT_READ | VM_PROT_NONE  | VM_PROT_NONE]    =TLB_AR_UR;
814 	user_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE]    =TLB_AR_URW;
815 	user_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE]    =TLB_AR_URW;
816 	user_prot[VM_PROT_NONE | VM_PROT_NONE  | VM_PROT_EXECUTE] =TLB_AR_URX;
817 	user_prot[VM_PROT_READ | VM_PROT_NONE  | VM_PROT_EXECUTE] =TLB_AR_URX;
818 	user_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_URWX;
819 	user_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_URWX;
820 
821 	/*
822 	 * Initialize kernel pmap
823 	 */
824 	kernel_pmap = &kernel_pmap_store;
825 #if	NCPUS > 1
826 	lock_init(&pmap_lock, FALSE, ETAP_VM_PMAP_SYS, ETAP_VM_PMAP_SYS_I);
827 #endif	/* NCPUS > 1 */
828 	simple_lock_init(&kernel_pmap->pmap_lock);
829 	simple_lock_init(&pmap_freelock);
830 	simple_lock_init(&sid_pid_lock);
831 
832 	kernel_pmap->pmap_refcnt = 1;
833 	kernel_pmap->pmap_space = HPPA_SID_KERNEL;
834 	kernel_pmap->pmap_pid = HPPA_PID_KERNEL;
835 
836 	/*
837 	 * Allocate various tables and structures.
838 	 */
839 	addr = hppa_round_page(*vstart);
840 	virtual_end = *vend;
841 
842 	/*
843 	 * Figure out how big the HPT must be, and align
844 	 * addr to what will be its beginning.  We don't
845 	 * waste the pages skipped for the alignment;
846 	 * they become struct pv_entry pages.
847 	 */
848 	pv_region = addr;
849 	mfctl(CR_HPTMASK, size);
850 	addr = (addr + size) & ~(size);
851 	pv_free_list = NULL;
852 	pmap_pv_add(pv_region, addr);
853 
854 	/* Allocate the HPT */
855 	for (hptp = (struct hpt_entry *)addr;
856 	     ((u_int)hptp - addr) <= size; hptp++) {
857 		hptp->hpt_valid   = 0;
858 		hptp->hpt_vpn     = 0;
859 		hptp->hpt_space   = -1;
860 		hptp->hpt_tlbpage = 0;
861 		hptp->hpt_tlbprot = 0;
862 		hptp->hpt_entry   = NULL;
863 	}
864 #ifdef PMAPDEBUG
865 	if (pmapdebug & PDB_INIT)
866 		printf("hpt_table: 0x%lx @ %p\n", size + 1, (caddr_t)addr);
867 #endif
868 	/* load cr25 with the address of the HPT table
869 	   NB: It sez CR_VTOP, but we (and the TLB handlers) know better ... */
870 	mtctl(addr, CR_VTOP);
871 	hpt_base = addr;
872 	hpt_mask = size;
873 	proc0.p_md.md_regs->tf_hptm = size;
874 	proc0.p_md.md_regs->tf_vtop = addr;
875 	addr += size + 1;
876 
877 	/* Allocate the struct pv_head array. */
878 	addr = ALIGN(addr);
879 	pv_head_tbl = (struct pv_head *) addr;
880 	memset(pv_head_tbl, 0, sizeof(*pv_head_tbl) * totalphysmem);
881 	addr = (vaddr_t) (pv_head_tbl + totalphysmem);
882 
883 	/* Allocate the page aliased bitmap. */
884 	addr = ALIGN(addr);
885 	page_aliased_bitmap = (u_int *) addr;
886 	addr = (vaddr_t) (&_PAGE_ALIASED_WORD(totalphysmem) + 1);
887 	memset(page_aliased_bitmap, 0, addr - (vaddr_t) page_aliased_bitmap);
888 
889 	/*
890 	 * Allocate the largest struct pv_entry region.   The
891 	 * 6 is a magic constant, chosen to allow on average
892 	 * all physical pages to have 6 simultaneous mappings
893 	 * without having to reclaim any struct pv_entry.
894 	 */
895 	pv_region = addr;
896 	addr += sizeof(struct pv_entry) * totalphysmem * 6;
897 	pmap_pv_add(pv_region, addr);
898 
899 	/*
900 	 * Allocate the steal region.  Because pmap_steal_memory
901 	 * must panic whenever an allocation cannot be fulfilled,
902 	 * we have to guess at the maximum amount of space that
903 	 * might be stolen.  Overestimating is not really a problem,
904 	 * as it only leads to lost virtual space, not lost physical
905 	 * pages.
906 	 */
907 	addr = hppa_round_page(addr);
908 	virtual_steal = addr;
909 	addr += totalphysmem * sizeof(struct vm_page);
910 	memset((caddr_t) virtual_steal, 0, addr - virtual_steal);
911 
912 	/*
913 	 * We now have a rough idea of where managed kernel virtual
914 	 * space will begin, and we can start mapping everything
915 	 * before that.
916 	 */
917 	addr = hppa_round_page(addr);
918 	*vstart = addr;
919 
920 	/*
921 	 * In general, the virtual space below the kernel text is
922 	 * left unmapped, to allow detection of NULL dereferences.
923 	 * However, these tmp_vpages are two virtual pages right
924 	 * before the kernel text that can be mapped for page copying
925 	 * and zeroing.
926 	 */
927 	tmp_vpages[1] = hppa_trunc_page((vaddr_t) &kernel_text) - PAGE_SIZE;
928 	tmp_vpages[0] = tmp_vpages[1] - PAGE_SIZE;
929 
930 	/*
931 	 * The kernel text, data, and bss must be direct-mapped,
932 	 * because the kernel often runs in physical mode, and
933 	 * anyways the loader loaded the kernel into physical
934 	 * memory exactly where it was linked.
935 	 *
936 	 * All memory already allocated after bss, either by
937 	 * our caller or by this function itself, must also be
938 	 * direct-mapped, because it's completely unmanaged
939 	 * and was allocated in physical mode.
940 	 *
941 	 * BTLB entries are used to do this direct mapping.
942 	 * BTLB entries have a minimum and maximum possible size,
943 	 * and MD code gives us these sizes in units of pages.
944 	 */
945 	btlb_entry_min = (vsize_t) hppa_btlb_size_min * PAGE_SIZE;
946 	btlb_entry_max = (vsize_t) hppa_btlb_size_max * PAGE_SIZE;
947 
948 	/*
949 	 * We begin by making BTLB entries for the kernel text.
950 	 * To keep things simple, we insist that the kernel text
951 	 * be aligned to the minimum BTLB entry size.
952 	 */
953 	if (((vaddr_t) &kernel_text) & (btlb_entry_min - 1))
954 		panic("kernel text not aligned to BTLB minimum size");
955 
956 	/*
957 	 * To try to conserve BTLB entries, take a hint from how
958 	 * the kernel was linked: take the kernel text start as
959 	 * our effective minimum BTLB entry size, assuming that
960 	 * the data segment was also aligned to that size.
961 	 *
962 	 * In practice, linking the kernel at 2MB, and aligning
963 	 * the data segment to a 2MB boundary, should control well
964 	 * how much of the BTLB the pmap uses.  However, this code
965 	 * should not rely on this 2MB magic number, nor should
966 	 * it rely on the data segment being aligned at all.  This
967 	 * is to allow (smaller) kernels (linked lower) to work fine.
968 	 */
969 	btlb_entry_min = (vaddr_t) &kernel_text;
970 	__asm __volatile (
971 		"	ldil L%%$global$, %0	\n"
972 		"	ldo R%%$global$(%0), %0	\n"
973 		: "=r" (kernel_data));
974 
975 	/*
976 	 * Now make BTLB entries to direct-map the kernel text
977 	 * read- and execute-only as much as possible.  Note that
978 	 * if the data segment isn't nicely aligned, the last
979 	 * BTLB entry for the kernel text may also cover some of
980 	 * the data segment, meaning it will have to allow writing.
981 	 */
982 	addr = (vaddr_t) &kernel_text;
983 	btlb_j = 0;
984 	while (addr < (vaddr_t) &etext) {
985 
986 		/* Set up the next BTLB entry. */
987 		KASSERT(btlb_j < BTLB_SET_SIZE);
988 		btlb_entry_start[btlb_j] = addr;
989 		btlb_entry_size[btlb_j] = btlb_entry_min;
990 		btlb_entry_vm_prot[btlb_j] = VM_PROT_READ | VM_PROT_EXECUTE;
991 		if (addr + btlb_entry_min > kernel_data)
992 			btlb_entry_vm_prot[btlb_j] |= VM_PROT_WRITE;
993 
994 		/* Coalesce BTLB entries whenever possible. */
995 		while (btlb_j > 0 &&
996 			btlb_entry_vm_prot[btlb_j] ==
997 			btlb_entry_vm_prot[btlb_j - 1] &&
998 			btlb_entry_size[btlb_j] ==
999 			btlb_entry_size[btlb_j - 1] &&
1000 			!(btlb_entry_start[btlb_j - 1] &
1001 			  ((btlb_entry_size[btlb_j - 1] << 1) - 1)) &&
1002 			(btlb_entry_size[btlb_j - 1] << 1) <=
1003 			btlb_entry_max)
1004 			btlb_entry_size[--btlb_j] <<= 1;
1005 
1006 		/* Move on. */
1007 		addr = btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j];
1008 		btlb_j++;
1009 	}
1010 
1011 	/*
1012 	 * Now make BTLB entries to direct-map the kernel data,
1013 	 * bss, and all of the preallocated space read-write.
1014 	 *
1015 	 * Note that, unlike above, we're not concerned with
1016 	 * making these BTLB entries such that they finish as
1017 	 * close as possible to the end of the space we need
1018 	 * them to map.  Instead, to minimize the number of BTLB
1019 	 * entries we need, we make them as large as possible.
1020 	 * The only thing this wastes is kernel virtual space,
1021 	 * which is plentiful.
1022 	 */
1023 	while (addr < *vstart) {
1024 
1025 		/* Make the next BTLB entry. */
1026 		KASSERT(btlb_j < BTLB_SET_SIZE);
1027 		size = btlb_entry_min;
1028 		while ((addr + size) < *vstart &&
1029 			(size << 1) < btlb_entry_max &&
1030 			!(addr & ((size << 1) - 1)))
1031 			size <<= 1;
1032 		btlb_entry_start[btlb_j] = addr;
1033 		btlb_entry_size[btlb_j] = size;
1034 		btlb_entry_vm_prot[btlb_j] = VM_PROT_READ | VM_PROT_WRITE;
1035 
1036 		/* Move on. */
1037 		addr = btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j];
1038 		btlb_j++;
1039 	}
1040 
1041 	/* Now insert all of the BTLB entries. */
1042 	for (btlb_i = 0; btlb_i < btlb_j; btlb_i++) {
1043 		btlb_entry_got = btlb_entry_size[btlb_i];
1044 		if (hppa_btlb_insert(kernel_pmap->pmap_space,
1045 				btlb_entry_start[btlb_i],
1046 				btlb_entry_start[btlb_i],
1047 				&btlb_entry_got,
1048 				kernel_pmap->pmap_pid |
1049 				pmap_prot(kernel_pmap,
1050 					btlb_entry_vm_prot[btlb_i])) < 0)
1051 			panic("pmap_bootstrap: cannot insert BTLB entry");
1052 		if (btlb_entry_got != btlb_entry_size[btlb_i])
1053 			panic("pmap_bootstrap: BTLB entry mapped wrong amount");
1054 	}
1055 
1056 	/*
1057 	 * We now know the exact beginning of managed kernel
1058 	 * virtual space.
1059 	 */
1060 	*vstart = btlb_entry_start[btlb_j - 1] + btlb_entry_size[btlb_j - 1];
1061 	virtual_start = *vstart;
1062 
1063 	/*
1064 	 * Finally, load physical pages into UVM.  There are
1065 	 * three segments of pages.
1066 	 */
1067 	physmem = 0;
1068 
1069 	/* The first segment runs from [resvmem..kernel_text). */
1070 	phys_start = resvmem;
1071 	phys_end = atop(hppa_trunc_page(&kernel_text));
1072 #ifdef DIAGNOSTIC
1073 	printf("phys segment: 0x%x 0x%x\n", (u_int)phys_start, (u_int)phys_end);
1074 #endif
1075 	if (phys_end > phys_start) {
1076 		uvm_page_physload(phys_start, phys_end,
1077 			phys_start, phys_end, VM_FREELIST_DEFAULT);
1078 		physmem += phys_end - phys_start;
1079 	}
1080 
1081 	/* The second segment runs from [etext..kernel_data). */
1082 	phys_start = atop(hppa_round_page((vaddr_t) &etext));
1083 	phys_end = atop(hppa_trunc_page(kernel_data));
1084 #ifdef DIAGNOSTIC
1085 	printf("phys segment: 0x%x 0x%x\n", (u_int)phys_start, (u_int)phys_end);
1086 #endif
1087 	if (phys_end > phys_start) {
1088 		uvm_page_physload(phys_start, phys_end,
1089 			phys_start, phys_end, VM_FREELIST_DEFAULT);
1090 		physmem += phys_end - phys_start;
1091 	}
1092 
1093 	/* The third segment runs from [virtual_steal..totalphysmem). */
1094 	phys_start = atop(virtual_steal);
1095 	phys_end = totalphysmem;
1096 #ifdef DIAGNOSTIC
1097 	printf("phys segment: 0x%x 0x%x\n", (u_int)phys_start, (u_int)phys_end);
1098 #endif
1099 	if (phys_end > phys_start) {
1100 		uvm_page_physload(phys_start, phys_end,
1101 			phys_start, phys_end, VM_FREELIST_DEFAULT);
1102 		physmem += phys_end - phys_start;
1103 	}
1104 }
1105 
1106 /*
1107  * pmap_steal_memory(size, startp, endp)
1108  *	steals memory block of size `size' from directly mapped
1109  *	segment (mapped behind the scenes).
1110  *	directly mapped cannot be grown dynamically once allocated.
1111  */
1112 vaddr_t
1113 pmap_steal_memory(size, startp, endp)
1114 	vsize_t size;
1115 	vaddr_t *startp;
1116 	vaddr_t *endp;
1117 {
1118 	vaddr_t va;
1119 	int lcv;
1120 
1121 	PMAP_PRINTF(PDB_STEAL, ("(%lx, %p, %p)\n", size, startp, endp));
1122 
1123 	/* Remind the caller of the start and end of virtual space. */
1124 	if (startp)
1125 		*startp = virtual_start;
1126 	if (endp)
1127 		*endp = virtual_end;
1128 
1129 	/* Round the allocation up to a page. */
1130 	size = hppa_round_page(size);
1131 
1132 	/* We must panic if we cannot steal the memory. */
1133 	if (size > virtual_start - virtual_steal)
1134 		panic("pmap_steal_memory: out of memory");
1135 
1136 	/* Steal the memory. */
1137 	va = virtual_steal;
1138 	virtual_steal += size;
1139 	PMAP_PRINTF(PDB_STEAL, (": steal %ld bytes @%x\n", size, (u_int)va));
1140 	for (lcv = 0; lcv < vm_nphysseg ; lcv++)
1141 		if (vm_physmem[lcv].start == atop(va)) {
1142 			vm_physmem[lcv].start = atop(virtual_steal);
1143 			vm_physmem[lcv].avail_start = atop(virtual_steal);
1144 			break;
1145 		}
1146 	if (lcv == vm_nphysseg)
1147 		panic("pmap_steal_memory inconsistency");
1148 
1149 	return va;
1150 }
1151 
1152 /*
1153  * How much virtual space does this kernel have?
1154  * (After mapping kernel text, data, etc.)
1155  */
1156 void
1157 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
1158 {
1159 	*vstartp = virtual_start;
1160 	*vendp = virtual_end;
1161 }
1162 
1163 /*
1164  * Finishes the initialization of the pmap module.
1165  * This procedure is called from vm_mem_init() in vm/vm_init.c
1166  * to initialize any remaining data structures that the pmap module
1167  * needs to map virtual memory (VM is already ON).
1168  */
1169 void
1170 pmap_init()
1171 {
1172 	extern void gateway_page __P((void));
1173 
1174 	TAILQ_INIT(&pmap_freelist);
1175 	pid_counter = HPPA_PID_KERNEL + 2;
1176 
1177 	/*
1178 	 * map SysCall gateways page once for everybody
1179 	 * NB: we'll have to remap the phys memory
1180 	 *     if we have any at SYSCALLGATE address (;
1181 	 *
1182 	 * no spls since no interrupts
1183 	 */
1184 	pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, SYSCALLGATE,
1185 		      (paddr_t)&gateway_page,
1186 		      TLB_GATE_PROT | TLB_UNMANAGED | TLB_WIRED);
1187 
1188 	pmap_initialized = TRUE;
1189 }
1190 
1191 /*
1192  * Initialize a preallocated and zeroed pmap structure,
1193  * such as one in a vmspace structure.
1194  */
1195 static void pmap_pinit __P((pmap_t));
1196 static void
1197 pmap_pinit(pmap)
1198 	pmap_t pmap;
1199 {
1200 	register u_int pid;
1201 	int s;
1202 
1203 	PMAP_PRINTF(PDB_PMAP, ("(%p), pid=%x\n", pmap, pmap->pmap_pid));
1204 
1205 	if (!(pid = pmap->pmap_pid)) {
1206 
1207 		/*
1208 		 * Allocate space and protection IDs for the pmap.
1209 		 * If all are allocated, there is nothing we can do.
1210 		 */
1211 		s = splvm();
1212 		if (pid_counter < HPPA_MAX_PID) {
1213 			pid = pid_counter;
1214 			pid_counter += 2;
1215 		} else
1216 			pid = 0;
1217 		splx(s);
1218 
1219 		if (pid == 0)
1220 			panic ("no more pmap ids\n");
1221 
1222 		simple_lock_init(&pmap->pmap_lock);
1223 	}
1224 
1225 	s = splvm();
1226 	pmap->pmap_pid = pid;
1227 	pmap->pmap_space = (pmap->pmap_pid >> 1) - 1;
1228 	pmap->pmap_refcnt = 1;
1229 	pmap->pmap_stats.resident_count = 0;
1230 	pmap->pmap_stats.wired_count = 0;
1231 	splx(s);
1232 }
1233 
1234 /*
1235  * pmap_create()
1236  *
1237  * Create and return a physical map.
1238  * the map is an actual physical map, and may be referenced by the hardware.
1239  */
1240 pmap_t
1241 pmap_create()
1242 {
1243 	register pmap_t pmap;
1244 	int s;
1245 
1246 	PMAP_PRINTF(PDB_PMAP, ("()"));
1247 
1248 	/*
1249 	 * If there is a pmap in the pmap free list, reuse it.
1250 	 */
1251 	s = splvm();
1252 	if (pmap_nfree) {
1253 		pmap = pmap_freelist.tqh_first;
1254 		TAILQ_REMOVE(&pmap_freelist, pmap, pmap_list);
1255 		pmap_nfree--;
1256 		splx(s);
1257 	} else {
1258 		splx(s);
1259 		MALLOC(pmap, struct pmap *, sizeof(*pmap), M_VMMAP, M_NOWAIT);
1260 		if (pmap == NULL)
1261 			return NULL;
1262 		bzero(pmap, sizeof(*pmap));
1263 	}
1264 
1265 	pmap_pinit(pmap);
1266 
1267 	return(pmap);
1268 }
1269 
1270 /*
1271  * pmap_destroy(pmap)
1272  *	Gives up a reference to the specified pmap.  When the reference count
1273  *	reaches zero the pmap structure is added to the pmap free list.
1274  *	Should only be called if the map contains no valid mappings.
1275  */
1276 void
1277 pmap_destroy(pmap)
1278 	pmap_t pmap;
1279 {
1280 	int ref_count;
1281 	int s;
1282 
1283 	PMAP_PRINTF(PDB_PMAP, ("(%p)\n", pmap));
1284 
1285 	s = splvm();
1286 
1287 	ref_count = --pmap->pmap_refcnt;
1288 
1289 	if (ref_count < 0)
1290 		panic("pmap_destroy(): ref_count < 0");
1291 	if (!ref_count) {
1292 		KASSERT(pmap->pmap_stats.resident_count == 0);
1293 		KASSERT(pmap->pmap_stats.wired_count == 0);
1294 
1295 		/*
1296 		 * Add the pmap to the pmap free list
1297 		 * We cannot free() disposed pmaps because of
1298 		 * PID shortage of 2^16
1299 		 */
1300 		TAILQ_INSERT_HEAD(&pmap_freelist, pmap, pmap_list);
1301 		pmap_nfree++;
1302 	}
1303 	splx(s);
1304 }
1305 
1306 /*
1307  * pmap_activate(proc)
1308  *	Activates the vmspace for the given process.  This
1309  *	isn't necessarily the current process.
1310  */
1311 void
1312 pmap_activate(struct proc *p)
1313 {
1314 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
1315 	pa_space_t space = pmap->pmap_space;
1316 	struct trapframe *tf = p->p_md.md_regs;
1317 
1318 	/* space is cached for the copy{in,out}'s pleasure */
1319 	p->p_addr->u_pcb.pcb_space = space;
1320 
1321 	/* Load all of the user's space registers. */
1322 	tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr2 = tf->tf_sr3 =
1323 	tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 = space;
1324 	tf->tf_iisq_head = tf->tf_iisq_tail = space;
1325 
1326 	/*
1327 	 * Load the protection registers.  NB that
1328 	 * if p *is* the current process, we set pidr2
1329 	 * to the new space immediately, so any copyins
1330 	 * or copyouts that happen before we return to
1331 	 * userspace work.
1332 	 */
1333 	tf->tf_pidr1 = tf->tf_pidr2 = pmap->pmap_pid;
1334 	if (p == curproc)
1335 		mtctl(pmap->pmap_pid, CR_PIDR2);
1336 }
1337 
1338 /*
1339  * pmap_enter(pmap, va, pa, prot, flags)
1340  *	Create a translation for the virtual address (va) to the physical
1341  *	address (pa) in the pmap with the protection requested. If the
1342  *	translation is wired then we can not allow a page fault to occur
1343  *	for this mapping.
1344  */
1345 int
1346 pmap_enter(pmap, va, pa, prot, flags)
1347 	pmap_t pmap;
1348 	vaddr_t va;
1349 	paddr_t pa;
1350 	vm_prot_t prot;
1351 	int flags;
1352 {
1353 	register struct pv_entry *pv;
1354 	u_int tlbpage, tlbprot;
1355 	pa_space_t space;
1356 	boolean_t waswired;
1357 	boolean_t wired = (flags & PMAP_WIRED) != 0;
1358 	int s;
1359 
1360 	/* Get a handle on the mapping we want to enter. */
1361 	space = pmap_sid(pmap, va);
1362 	va = hppa_trunc_page(va);
1363 	pa = hppa_trunc_page(pa);
1364 	tlbpage = tlbbtop(pa);
1365 	tlbprot = pmap_prot(pmap, prot) | pmap->pmap_pid;
1366 	if (wired)
1367 		tlbprot |= TLB_WIRED;
1368 
1369 #ifdef PMAPDEBUG
1370 	if (!pmap_initialized || (pmapdebug & PDB_ENTER))
1371 		PMAP_PRINTF(0, ("(%p, %p, %p, %x, %swired)\n",
1372 				pmap, (caddr_t)va, (caddr_t)pa,
1373 				prot, wired? "" : "un"));
1374 #endif
1375 
1376 	s = splvm();
1377 
1378 	if (!(pv = pmap_pv_find_va(space, va))) {
1379 		/*
1380 		 * Mapping for this virtual address doesn't exist.
1381 		 * Enter a new mapping.
1382 		 */
1383 		pv = pmap_pv_enter(pmap, space, va, pa, tlbprot);
1384 		pmap->pmap_stats.resident_count++;
1385 		waswired = FALSE;
1386 	} else {
1387 		KASSERT((pv->pv_tlbprot & TLB_UNMANAGED) == 0);
1388 		waswired = pv->pv_tlbprot & TLB_WIRED;
1389 
1390 		/* see if we are remapping the page to another PA */
1391 		if (pv->pv_tlbpage != tlbpage) {
1392 			PMAP_PRINTF(PDB_ENTER, (": moving pa %x -> %x\n",
1393 						pv->pv_tlbpage, tlbpage));
1394 			/* update tlbprot to avoid extra subsequent fault */
1395 			pmap_pv_remove(pv);
1396 			pv = pmap_pv_enter(pmap, space, va, pa, tlbprot);
1397 		} else {
1398 			/* We are just changing the protection.  */
1399 #ifdef PMAPDEBUG
1400 			if (pmapdebug & PDB_ENTER) {
1401 				char buffer1[64];
1402 				char buffer2[64];
1403 				bitmask_snprintf(pv->pv_tlbprot, TLB_BITS,
1404 						 buffer1, sizeof(buffer1));
1405 				bitmask_snprintf(tlbprot, TLB_BITS,
1406 						 buffer2, sizeof(buffer2));
1407 				printf("pmap_enter: changing %s->%s\n",
1408 				    buffer1, buffer2);
1409 			}
1410 #endif
1411 			pmap_pv_update(pv, TLB_AR_MASK|TLB_PID_MASK|TLB_WIRED,
1412 				       tlbprot);
1413 		}
1414 	}
1415 
1416 	/*
1417 	 * Adjust statistics
1418 	 */
1419 	if (wired && !waswired) {
1420 		pmap->pmap_stats.wired_count++;
1421 	} else if (!wired && waswired) {
1422 		pmap->pmap_stats.wired_count--;
1423 	}
1424 	splx(s);
1425 
1426 	return (0);
1427 }
1428 
1429 /*
1430  * pmap_remove(pmap, sva, eva)
1431  *	unmaps all virtual addresses v in the virtual address
1432  *	range determined by [sva, eva) and pmap.
1433  *	sva and eva must be on machine independent page boundaries and
1434  *	sva must be less than or equal to eva.
1435  */
1436 void
1437 pmap_remove(pmap, sva, eva)
1438 	register pmap_t pmap;
1439 	register vaddr_t sva;
1440 	register vaddr_t eva;
1441 {
1442 	register struct pv_entry *pv;
1443 	register pa_space_t space;
1444 	int s;
1445 
1446 	PMAP_PRINTF(PDB_REMOVE, ("(%p, %p, %p)\n",
1447 				 pmap, (caddr_t)sva, (caddr_t)eva));
1448 
1449 	sva = hppa_trunc_page(sva);
1450 	space = pmap_sid(pmap, sva);
1451 
1452 	s = splvm();
1453 
1454 	while (sva < eva) {
1455 		pv = pmap_pv_find_va(space, sva);
1456 		if (pv) {
1457 			KASSERT((pv->pv_tlbprot & TLB_UNMANAGED) == 0);
1458 			KASSERT(pmap->pmap_stats.resident_count > 0);
1459 			pmap->pmap_stats.resident_count--;
1460 			if (pv->pv_tlbprot & TLB_WIRED) {
1461 				KASSERT(pmap->pmap_stats.wired_count > 0);
1462 				pmap->pmap_stats.wired_count--;
1463 			}
1464 			pmap_pv_remove(pv);
1465 			PMAP_PRINTF(PDB_REMOVE, (": removed %p for 0x%x:%p\n",
1466 						 pv, space, (caddr_t)sva));
1467 		}
1468 		sva += PAGE_SIZE;
1469 	}
1470 
1471 	splx(s);
1472 }
1473 
1474 /*
1475  *	pmap_page_protect(pa, prot)
1476  *
1477  *	Lower the permission for all mappings to a given page.
1478  */
1479 void
1480 pmap_page_protect(pg, prot)
1481 	struct vm_page *pg;
1482 	vm_prot_t prot;
1483 {
1484 	register struct pv_entry *pv, *pv_next;
1485 	register pmap_t pmap;
1486 	register u_int tlbprot;
1487 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
1488 	int s;
1489 
1490 	PMAP_PRINTF(PDB_PROTECT, ("(%p, %x)\n", (caddr_t)pa, prot));
1491 
1492 	switch (prot) {
1493 	case VM_PROT_ALL:
1494 		return;
1495 	case VM_PROT_READ:
1496 	case VM_PROT_READ|VM_PROT_EXECUTE:
1497 		s = splvm();
1498 		for (pv = pmap_pv_find_pa(pa); pv; pv = pv->pv_next) {
1499 			/* Ignore unmanaged mappings. */
1500 			if (pv->pv_tlbprot & TLB_UNMANAGED)
1501 				continue;
1502 			/*
1503 			 * Compare new protection with old to see if
1504 			 * anything needs to be changed.
1505 			 */
1506 			tlbprot = pmap_prot(pv->pv_pmap, prot);
1507 			if ((pv->pv_tlbprot & TLB_AR_MASK) != tlbprot) {
1508 				pmap_pv_update(pv, TLB_AR_MASK, tlbprot);
1509 			}
1510 		}
1511 		splx(s);
1512 		break;
1513 	default:
1514 		s = splvm();
1515 		for (pv = pmap_pv_find_pa(pa); pv != NULL; pv = pv_next) {
1516 			pv_next = pv->pv_next;
1517 			/* Ignore unmanaged mappings. */
1518 			if (pv->pv_tlbprot & TLB_UNMANAGED)
1519 				continue;
1520 #ifdef PMAPDEBUG
1521 			if (pmapdebug & PDB_PROTECT) {
1522 				char buffer[64];
1523 				bitmask_snprintf(pv->pv_tlbprot, TLB_BITS,
1524 						 buffer, sizeof(buffer));
1525 				printf("pv={%p,%x:%x,%s,%x}->%p\n",
1526 				    pv->pv_pmap, pv->pv_space, pv->pv_va,
1527 				    buffer,
1528 				    tlbptob(pv->pv_tlbpage), pv->pv_hash);
1529 			}
1530 #endif
1531 			pmap = pv->pv_pmap;
1532 			if (pv->pv_tlbprot & TLB_WIRED) {
1533 				KASSERT(pmap->pmap_stats.wired_count > 0);
1534 				pmap->pmap_stats.wired_count--;
1535 			}
1536 			pmap_pv_remove(pv);
1537 			KASSERT(pmap->pmap_stats.resident_count > 0);
1538 			pmap->pmap_stats.resident_count--;
1539 		}
1540 		splx(s);
1541 		break;
1542 	}
1543 }
1544 
1545 /*
1546  * pmap_protect(pmap, s, e, prot)
1547  *	changes the protection on all virtual addresses v in the
1548  *	virtual address range determined by [s, e) and pmap to prot.
1549  *	s and e must be on machine independent page boundaries and
1550  *	s must be less than or equal to e.
1551  */
1552 void
1553 pmap_protect(pmap, sva, eva, prot)
1554 	pmap_t pmap;
1555 	vaddr_t sva;
1556 	vaddr_t eva;
1557 	vm_prot_t prot;
1558 {
1559 	register struct pv_entry *pv;
1560 	u_int tlbprot;
1561 	pa_space_t space;
1562 	int s;
1563 
1564 	PMAP_PRINTF(PDB_PROTECT, ("(%p, %p, %p, %x)\n",
1565 				 pmap, (caddr_t)sva, (caddr_t)eva, prot));
1566 
1567 	if (prot == VM_PROT_NONE) {
1568 		pmap_remove(pmap, sva, eva);
1569 		return;
1570 	}
1571 	if (prot & VM_PROT_WRITE)
1572 		return;
1573 
1574 	sva = hppa_trunc_page(sva);
1575 	space = pmap_sid(pmap, sva);
1576 	tlbprot = pmap_prot(pmap, prot);
1577 
1578 	s = splvm();
1579 	for(; sva < eva; sva += PAGE_SIZE) {
1580 		if((pv = pmap_pv_find_va(space, sva))) {
1581 			KASSERT((pv->pv_tlbprot & TLB_UNMANAGED) == 0);
1582 			/*
1583 			 * Compare new protection with old to see if
1584 			 * anything needs to be changed.
1585 			 */
1586 			if ((pv->pv_tlbprot & TLB_AR_MASK) != tlbprot) {
1587 				pmap_pv_update(pv, TLB_AR_MASK, tlbprot);
1588 			}
1589 		}
1590 	}
1591 	splx(s);
1592 }
1593 
1594 /*
1595  *	Routine:	pmap_unwire
1596  *	Function:	Change the wiring attribute for a map/virtual-address
1597  *			pair.
1598  *	In/out conditions:
1599  *			The mapping must already exist in the pmap.
1600  *
1601  * Change the wiring for a given virtual page. This routine currently is
1602  * only used to unwire pages and hence the mapping entry will exist.
1603  */
1604 void
1605 pmap_unwire(pmap, va)
1606 	pmap_t	pmap;
1607 	vaddr_t	va;
1608 {
1609 	struct pv_entry *pv;
1610 	int s;
1611 
1612 	va = hppa_trunc_page(va);
1613 	PMAP_PRINTF(PDB_WIRING, ("(%p, %p)\n", pmap, (caddr_t)va));
1614 
1615 	simple_lock(&pmap->pmap_lock);
1616 
1617 	s = splvm();
1618 	if ((pv = pmap_pv_find_va(pmap_sid(pmap, va), va)) == NULL)
1619 		panic("pmap_unwire: can't find mapping entry");
1620 
1621 	KASSERT((pv->pv_tlbprot & TLB_UNMANAGED) == 0);
1622 	if (pv->pv_tlbprot & TLB_WIRED) {
1623 		KASSERT(pmap->pmap_stats.wired_count > 0);
1624 		pv->pv_tlbprot &= ~TLB_WIRED;
1625 		pmap->pmap_stats.wired_count--;
1626 	}
1627 	splx(s);
1628 	simple_unlock(&pmap->pmap_lock);
1629 }
1630 
1631 /*
1632  * pmap_extract(pmap, va, pap)
1633  *	fills in the physical address corrsponding to the
1634  *	virtual address specified by pmap and va into the
1635  *	storage pointed to by pap and returns TRUE if the
1636  *	virtual address is mapped. returns FALSE in not mapped.
1637  */
1638 boolean_t
1639 pmap_extract(pmap, va, pap)
1640 	pmap_t pmap;
1641 	vaddr_t va;
1642 	paddr_t *pap;
1643 {
1644 	struct pv_entry *pv;
1645 	vaddr_t off;
1646 	int s;
1647 
1648 	off = va;
1649 	off -= (va = hppa_trunc_page(va));
1650 
1651 	s = splvm();
1652 	if ((pv = pmap_pv_find_va(pmap_sid(pmap, va), va))) {
1653 		if (pap != NULL)
1654 			*pap = tlbptob(pv->pv_tlbpage) + off;
1655 		PMAP_PRINTF(PDB_EXTRACT, ("(%p, %p) = %p\n",
1656 				pmap, (caddr_t)va,
1657 				(caddr_t)(tlbptob(pv->pv_tlbpage) + off)));
1658 	} else {
1659 		PMAP_PRINTF(PDB_EXTRACT, ("(%p, %p) unmapped\n",
1660 					 pmap, (caddr_t)va));
1661 	}
1662 	splx(s);
1663 	return (pv != NULL);
1664 }
1665 
1666 /*
1667  * pmap_zero_page(pa)
1668  *
1669  * Zeros the specified page.
1670  */
1671 void
1672 pmap_zero_page(pa)
1673 	register paddr_t pa;
1674 {
1675 	struct pv_entry *pv;
1676 	int s;
1677 
1678 	PMAP_PRINTF(PDB_ZERO, ("(%p)\n", (caddr_t)pa));
1679 
1680 	s = splvm(); /* XXX are we already that high? */
1681 
1682 	/* Map the physical page. */
1683 	pv = pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, tmp_vpages[1], pa,
1684 			TLB_AR_KRW | TLB_UNMANAGED | TLB_WIRED);
1685 
1686 	/* Zero it. */
1687 	memset((caddr_t)tmp_vpages[1], 0, PAGE_SIZE);
1688 
1689 	/* Unmap the physical page. */
1690 	pmap_pv_remove(pv);
1691 
1692 	splx(s);
1693 }
1694 
1695 /*
1696  * pmap_copy_page(src, dst)
1697  *
1698  * pmap_copy_page copies the src page to the destination page. If a mapping
1699  * can be found for the source, we use that virtual address. Otherwise, a
1700  * slower physical page copy must be done. The destination is always a
1701  * physical address sivnce there is usually no mapping for it.
1702  */
1703 void
1704 pmap_copy_page(spa, dpa)
1705 	paddr_t spa;
1706 	paddr_t dpa;
1707 {
1708 	struct pv_entry *spv, *dpv;
1709 	int s;
1710 
1711 	PMAP_PRINTF(PDB_COPY, ("(%p, %p)\n", (caddr_t)spa, (caddr_t)dpa));
1712 
1713 	s = splvm(); /* XXX are we already that high? */
1714 
1715 	/* Map the two pages. */
1716 	spv = pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, tmp_vpages[0], spa,
1717 			TLB_AR_KR | TLB_UNMANAGED | TLB_WIRED);
1718 	dpv = pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, tmp_vpages[1], dpa,
1719 			TLB_AR_KRW | TLB_UNMANAGED | TLB_WIRED);
1720 
1721 	/* Do the copy. */
1722 	memcpy((caddr_t)tmp_vpages[1], (const caddr_t)tmp_vpages[0], PAGE_SIZE);
1723 
1724 	/* Unmap the pages. */
1725 	pmap_pv_remove(spv);
1726 	pmap_pv_remove(dpv);
1727 
1728 	splx(s);
1729 }
1730 
1731 /*
1732  * Given a PA and a bit, this tests and clears that bit in
1733  * the modref information for the PA.
1734  */
1735 static __inline boolean_t pmap_clear_bit __P((paddr_t, u_int));
1736 static __inline boolean_t
1737 pmap_clear_bit(paddr_t pa, u_int tlbprot_bit)
1738 {
1739 	int table_off;
1740 	struct pv_head *hpv;
1741 	u_int pv_head_bit;
1742 	boolean_t ret;
1743 	int s;
1744 
1745 	table_off = pmap_table_find_pa(pa);
1746 	KASSERT(table_off >= 0);
1747 	hpv = pv_head_tbl + table_off;
1748 	pv_head_bit = (tlbprot_bit == TLB_REF ? PV_HEAD_REF : PV_HEAD_DIRTY);
1749 	s = splvm();
1750 	_pmap_pv_update(pa, NULL, tlbprot_bit, 0);
1751 	ret = hpv->pv_head_writable_dirty_ref & pv_head_bit;
1752 	hpv->pv_head_writable_dirty_ref &= ~pv_head_bit;
1753 	splx(s);
1754 	return ret;
1755 }
1756 
1757 /*
1758  * Given a PA and a bit, this tests that bit in the modref
1759  * information for the PA.
1760  */
1761 static __inline boolean_t pmap_test_bit __P((paddr_t, u_int));
1762 static __inline boolean_t
1763 pmap_test_bit(paddr_t pa, u_int tlbprot_bit)
1764 {
1765 	int table_off;
1766 	struct pv_head *hpv;
1767 	u_int pv_head_bit;
1768 	struct pv_entry *pv;
1769 	boolean_t ret;
1770 	int s;
1771 
1772 	table_off = pmap_table_find_pa(pa);
1773 	KASSERT(table_off >= 0);
1774 	hpv = pv_head_tbl + table_off;
1775 	pv_head_bit = (tlbprot_bit == TLB_REF ? PV_HEAD_REF : PV_HEAD_DIRTY);
1776 	s = splvm();
1777 	ret = (hpv->pv_head_writable_dirty_ref & pv_head_bit) != 0;
1778 	if (!ret) {
1779 		for (pv = hpv->pv_head_pvs;
1780 		     pv != NULL;
1781 		     pv = pv->pv_next) {
1782 			if ((pv->pv_tlbprot & (TLB_UNMANAGED | tlbprot_bit)) ==
1783 			    tlbprot_bit) {
1784 				hpv->pv_head_writable_dirty_ref |= pv_head_bit;
1785 				ret = TRUE;
1786 				break;
1787 			}
1788 		}
1789 	}
1790 	splx(s);
1791 	return ret;
1792 }
1793 
1794 /*
1795  * pmap_clear_modify(pa)
1796  *	clears the hardware modified ("dirty") bit for one
1797  *	machine independant page starting at the given
1798  *	physical address.  phys must be aligned on a machine
1799  *	independant page boundary.
1800  */
1801 boolean_t
1802 pmap_clear_modify(pg)
1803 	struct vm_page *pg;
1804 {
1805 	register paddr_t pa = VM_PAGE_TO_PHYS(pg);
1806 	boolean_t ret = pmap_clear_bit(pa, TLB_DIRTY);
1807 	PMAP_PRINTF(PDB_BITS, ("(%p) = %d\n", (caddr_t)pa, ret));
1808 	return ret;
1809 }
1810 
1811 /*
1812  * pmap_is_modified(pa)
1813  *	returns TRUE if the given physical page has been modified
1814  *	since the last call to pmap_clear_modify().
1815  */
1816 boolean_t
1817 pmap_is_modified(pg)
1818 	struct vm_page *pg;
1819 {
1820 	register paddr_t pa = VM_PAGE_TO_PHYS(pg);
1821 	boolean_t ret = pmap_test_bit(pa, TLB_DIRTY);
1822 	PMAP_PRINTF(PDB_BITS, ("(%p) = %d\n", (caddr_t)pa, ret));
1823 	return ret;
1824 }
1825 
1826 /*
1827  * pmap_clear_reference(pa)
1828  *	clears the hardware referenced bit in the given machine
1829  *	independant physical page.
1830  *
1831  *	Currently, we treat a TLB miss as a reference; i.e. to clear
1832  *	the reference bit we flush all mappings for pa from the TLBs.
1833  */
1834 boolean_t
1835 pmap_clear_reference(pg)
1836 	struct vm_page *pg;
1837 {
1838 	register paddr_t pa = VM_PAGE_TO_PHYS(pg);
1839 	boolean_t ret = pmap_clear_bit(pa, TLB_REF);
1840 	PMAP_PRINTF(PDB_BITS, ("(%p) = %d\n", (caddr_t)pa, ret));
1841 	return ret;
1842 }
1843 
1844 /*
1845  * pmap_is_referenced(pa)
1846  *	returns TRUE if the given physical page has been referenced
1847  *	since the last call to pmap_clear_reference().
1848  */
1849 boolean_t
1850 pmap_is_referenced(pg)
1851 	struct vm_page *pg;
1852 {
1853 	register paddr_t pa = VM_PAGE_TO_PHYS(pg);
1854 	boolean_t ret = pmap_test_bit(pa, TLB_REF);
1855 	PMAP_PRINTF(PDB_BITS, ("(%p) = %d\n", (caddr_t)pa, ret));
1856 	return ret;
1857 }
1858 
1859 void
1860 pmap_kenter_pa(va, pa, prot)
1861 	vaddr_t va;
1862 	paddr_t pa;
1863 	vm_prot_t prot;
1864 {
1865 	int s;
1866 #ifdef PMAPDEBUG
1867 	int opmapdebug = pmapdebug;
1868 
1869 	/*
1870 	 * If we're being told to map page zero, we can't
1871 	 * call printf() at all, because doing so would
1872 	 * lead to an infinite recursion on this call.
1873 	 * (printf requires page zero to be mapped).
1874 	 */
1875 	if (va == 0)
1876 		pmapdebug = 0;
1877 #endif /* PMAPDEBUG */
1878 
1879 	PMAP_PRINTF(PDB_KENTER, ("(%p, %p, %x)\n",
1880 				 (caddr_t)va, (caddr_t)pa, prot));
1881 	va = hppa_trunc_page(va);
1882 	s = splvm();
1883 	KASSERT(pmap_pv_find_va(HPPA_SID_KERNEL, va) == NULL);
1884 	pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, va, pa,
1885 		      pmap_prot(pmap_kernel(), prot) |
1886 		      TLB_WIRED | TLB_UNMANAGED);
1887 	splx(s);
1888 #ifdef PMAPDEBUG
1889 	pmapdebug = opmapdebug;
1890 #endif /* PMAPDEBUG */
1891 }
1892 
1893 void
1894 pmap_kremove(va, size)
1895 	vaddr_t va;
1896 	vsize_t size;
1897 {
1898 	register struct pv_entry *pv;
1899 	int s;
1900 #ifdef PMAPDEBUG
1901 	int opmapdebug = pmapdebug;
1902 
1903 	/*
1904 	 * If we're being told to unmap page zero, we can't
1905 	 * call printf() at all, because doing so would
1906 	 * lead to an infinite recursion on this call.
1907 	 * (printf requires page zero to be mapped).
1908 	 */
1909 	if (va == 0)
1910 		pmapdebug = 0;
1911 #endif /* PMAPDEBUG */
1912 
1913 	PMAP_PRINTF(PDB_KENTER, ("(%p, %x)\n",
1914 				 (caddr_t)va, (u_int)size));
1915 
1916 	size += va;
1917 	va = hppa_trunc_page(va);
1918 	size -= va;
1919 	s = splvm();
1920 	for (size = hppa_round_page(size); size;
1921 	    size -= PAGE_SIZE, va += PAGE_SIZE) {
1922 		pv = pmap_pv_find_va(HPPA_SID_KERNEL, va);
1923 		if (pv) {
1924 			KASSERT((pv->pv_tlbprot & TLB_UNMANAGED) != 0);
1925 			pmap_pv_remove(pv);
1926 		} else {
1927 			PMAP_PRINTF(PDB_REMOVE, (": no pv for %p\n",
1928 						 (caddr_t)va));
1929 		}
1930 	}
1931 	splx(s);
1932 #ifdef PMAPDEBUG
1933 	pmapdebug = opmapdebug;
1934 #endif /* PMAPDEBUG */
1935 }
1936 
1937 /*
1938  * pmap_redzone(sva, eva, create)
1939  *	creates or removes a red zone in already mapped and wired memory,
1940  *	from [sva, eva) in the kernel map.
1941  */
1942 void
1943 pmap_redzone(vaddr_t sva, vaddr_t eva, int create)
1944 {
1945 	vaddr_t va;
1946 	struct pv_entry *pv;
1947 	u_int tlbprot;
1948 	int s;
1949 
1950 	sva = hppa_trunc_page(sva);
1951 	tlbprot = (create ? TLB_AR_NA : TLB_AR_KRW);
1952 	s = splvm();
1953 	for(va = sva; va < eva; va += PAGE_SIZE) {
1954 		pv = pmap_pv_find_va(HPPA_SID_KERNEL, va);
1955 		KASSERT(pv != NULL);
1956 		/*
1957 		 * Compare new protection with old to see if
1958 		 * anything needs to be changed.
1959 		 */
1960 		if ((pv->pv_tlbprot & TLB_AR_MASK) != tlbprot)
1961 			pmap_pv_update(pv, TLB_AR_MASK, tlbprot);
1962 	}
1963 	splx(s);
1964 }
1965 
1966 #if defined(PMAPDEBUG) && defined(DDB)
1967 #include <ddb/db_output.h>
1968 /*
1969  * prints whole va->pa (aka HPT or HVT)
1970  */
1971 void
1972 pmap_hptdump()
1973 {
1974 	register struct hpt_entry *hpt, *ehpt;
1975 	register struct pv_entry *pv;
1976 
1977 	mfctl(CR_HPTMASK, ehpt);
1978 	mfctl(CR_VTOP, hpt);
1979 	ehpt = (struct hpt_entry *)((int)hpt + (int)ehpt + 1);
1980 	db_printf("HPT dump %p-%p:\n", hpt, ehpt);
1981 	for (; hpt < ehpt; hpt++)
1982 		if (hpt->hpt_valid || hpt->hpt_entry) {
1983 			db_printf("hpt@%p: %x{%sv=%x:%x},%b,%x\n",
1984 			    hpt, *(int *)hpt, (hpt->hpt_valid?"ok,":""),
1985 			    hpt->hpt_space, hpt->hpt_vpn << 9,
1986 			    hpt->hpt_tlbprot, TLB_BITS,
1987 			    tlbptob(hpt->hpt_tlbpage));
1988 			for (pv = hpt->hpt_entry; pv; pv = pv->pv_hash)
1989 				db_printf("    pv={%p,%x:%x,%b,%x}->%p\n",
1990 				    pv->pv_pmap, pv->pv_space, pv->pv_va,
1991 				    pv->pv_tlbprot, TLB_BITS,
1992 				    tlbptob(pv->pv_tlbpage), pv->pv_hash);
1993 		}
1994 }
1995 #endif
1996