xref: /original-bsd/sys/hp300/hp300/pmap.c (revision 736e6f7a)
11537129bSmckusick /*
24975c9eaSbostic  * Copyright (c) 1991, 1993
34975c9eaSbostic  *	The Regents of the University of California.  All rights reserved.
41537129bSmckusick  *
51537129bSmckusick  * This code is derived from software contributed to Berkeley by
65bbbbabbSmckusick  * the Systems Programming Group of the University of Utah Computer
75bbbbabbSmckusick  * Science Department.
81537129bSmckusick  *
95bbbbabbSmckusick  * %sccs.include.redist.c%
101537129bSmckusick  *
11*736e6f7aSmckusick  *	@(#)pmap.c	8.7 (Berkeley) 05/17/95
121537129bSmckusick  */
131537129bSmckusick 
141537129bSmckusick /*
151537129bSmckusick  * HP9000/300 series physical map management code.
1624eb1037Shibler  *
17b0b4aa9cShibler  * Supports:
18b0b4aa9cShibler  *	68020 with HP MMU	models 320, 350
19b0b4aa9cShibler  *	68020 with 68551 MMU	models 318, 319, 330 (all untested)
20b0b4aa9cShibler  *	68030 with on-chip MMU	models 340, 360, 370, 345, 375, 400
21b0b4aa9cShibler  *	68040 with on-chip MMU	models 380, 425, 433
2224eb1037Shibler  *
2324eb1037Shibler  * Notes:
241537129bSmckusick  *	Don't even pay lip service to multiprocessor support.
259c60e975Shibler  *
2624eb1037Shibler  *	We assume TLB entries don't have process tags (except for the
2724eb1037Shibler  *	supervisor/user distinction) so we only invalidate TLB entries
2824eb1037Shibler  *	when changing mappings for the current (or kernel) pmap.  This is
2924eb1037Shibler  *	technically not true for the 68551 but we flush the TLB on every
3024eb1037Shibler  *	context switch, so it effectively winds up that way.
3124eb1037Shibler  *
3224eb1037Shibler  *	Bitwise and/or operations are significantly faster than bitfield
3324eb1037Shibler  *	references so we use them when accessing STE/PTEs in the pmap_pte_*
3424eb1037Shibler  *	macros.  Note also that the two are not always equivalent; e.g.:
3524eb1037Shibler  *		(*(int *)pte & PG_PROT) [4] != pte->pg_prot [1]
3624eb1037Shibler  *	and a couple of routines that deal with protection and wiring take
3724eb1037Shibler  *	some shortcuts that assume the and/or definitions.
3824eb1037Shibler  *
3924eb1037Shibler  *	This implementation will only work for PAGE_SIZE == NBPG
4024eb1037Shibler  *	(i.e. 4096 bytes).
411537129bSmckusick  */
421537129bSmckusick 
431537129bSmckusick /*
441537129bSmckusick  *	Manages physical address maps.
451537129bSmckusick  *
461537129bSmckusick  *	In addition to hardware address maps, this
471537129bSmckusick  *	module is called upon to provide software-use-only
481537129bSmckusick  *	maps which may or may not be stored in the same
491537129bSmckusick  *	form as hardware maps.  These pseudo-maps are
501537129bSmckusick  *	used to store intermediate results from copy
511537129bSmckusick  *	operations to and from address spaces.
521537129bSmckusick  *
531537129bSmckusick  *	Since the information managed by this module is
541537129bSmckusick  *	also stored by the logical address mapping module,
551537129bSmckusick  *	this module may throw away valid virtual-to-physical
561537129bSmckusick  *	mappings at almost any time.  However, invalidations
571537129bSmckusick  *	of virtual-to-physical mappings must be done as
581537129bSmckusick  *	requested.
591537129bSmckusick  *
601537129bSmckusick  *	In order to cope with hardware architectures which
611537129bSmckusick  *	make virtual-to-physical map invalidates expensive,
621537129bSmckusick  *	this module may delay invalidate or reduced protection
631537129bSmckusick  *	operations until such time as they are actually
641537129bSmckusick  *	necessary.  This module is given full information as
651537129bSmckusick  *	to which processors are currently using which maps,
661537129bSmckusick  *	and to when physical maps must be made correct.
671537129bSmckusick  */
681537129bSmckusick 
694e6f2c04Sbostic #include <sys/param.h>
704e6f2c04Sbostic #include <sys/systm.h>
714e6f2c04Sbostic #include <sys/proc.h>
724e6f2c04Sbostic #include <sys/malloc.h>
734e6f2c04Sbostic #include <sys/user.h>
741537129bSmckusick 
754e6f2c04Sbostic #include <hp300/hp300/pte.h>
761537129bSmckusick 
774e6f2c04Sbostic #include <vm/vm.h>
784e6f2c04Sbostic #include <vm/vm_kern.h>
794e6f2c04Sbostic #include <vm/vm_page.h>
80d45eccefSkarels 
814e6f2c04Sbostic #include <machine/cpu.h>
821537129bSmckusick 
8324eb1037Shibler #ifdef PMAPSTATS
841537129bSmckusick struct {
851537129bSmckusick 	int collectscans;
861537129bSmckusick 	int collectpages;
871537129bSmckusick 	int kpttotal;
881537129bSmckusick 	int kptinuse;
891537129bSmckusick 	int kptmaxuse;
901537129bSmckusick } kpt_stats;
911537129bSmckusick struct {
921537129bSmckusick 	int kernel;	/* entering kernel mapping */
931537129bSmckusick 	int user;	/* entering user mapping */
941537129bSmckusick 	int ptpneeded;	/* needed to allocate a PT page */
9524eb1037Shibler 	int nochange;	/* no change at all */
961537129bSmckusick 	int pwchange;	/* no mapping change, just wiring or protection */
971537129bSmckusick 	int wchange;	/* no mapping change, just wiring */
9824eb1037Shibler 	int pchange;	/* no mapping change, just protection */
991537129bSmckusick 	int mchange;	/* was mapped but mapping to different page */
1001537129bSmckusick 	int managed;	/* a managed page */
1011537129bSmckusick 	int firstpv;	/* first mapping for this PA */
1021537129bSmckusick 	int secondpv;	/* second mapping for this PA */
1031537129bSmckusick 	int ci;		/* cache inhibited */
1041537129bSmckusick 	int unmanaged;	/* not a managed page */
1051537129bSmckusick 	int flushes;	/* cache flushes */
1061537129bSmckusick } enter_stats;
1071537129bSmckusick struct {
1081537129bSmckusick 	int calls;
1091537129bSmckusick 	int removes;
1101537129bSmckusick 	int pvfirst;
1111537129bSmckusick 	int pvsearch;
1121537129bSmckusick 	int ptinvalid;
1131537129bSmckusick 	int uflushes;
1141537129bSmckusick 	int sflushes;
1151537129bSmckusick } remove_stats;
11614b153a2Shibler struct {
11714b153a2Shibler 	int calls;
11824eb1037Shibler 	int changed;
11914b153a2Shibler 	int alreadyro;
12014b153a2Shibler 	int alreadyrw;
12114b153a2Shibler } protect_stats;
12224eb1037Shibler struct chgstats {
12324eb1037Shibler 	int setcalls;
12424eb1037Shibler 	int sethits;
12524eb1037Shibler 	int setmiss;
12624eb1037Shibler 	int clrcalls;
12724eb1037Shibler 	int clrhits;
12824eb1037Shibler 	int clrmiss;
12924eb1037Shibler } changebit_stats[16];
13024eb1037Shibler #endif
1311537129bSmckusick 
13224eb1037Shibler #ifdef DEBUG
1331537129bSmckusick int debugmap = 0;
1341537129bSmckusick int pmapdebug = 0x2000;
1351537129bSmckusick #define PDB_FOLLOW	0x0001
1361537129bSmckusick #define PDB_INIT	0x0002
1371537129bSmckusick #define PDB_ENTER	0x0004
1381537129bSmckusick #define PDB_REMOVE	0x0008
1391537129bSmckusick #define PDB_CREATE	0x0010
1401537129bSmckusick #define PDB_PTPAGE	0x0020
1411537129bSmckusick #define PDB_CACHE	0x0040
1421537129bSmckusick #define PDB_BITS	0x0080
1431537129bSmckusick #define PDB_COLLECT	0x0100
1441537129bSmckusick #define PDB_PROTECT	0x0200
1451537129bSmckusick #define PDB_SEGTAB	0x0400
14624eb1037Shibler #define PDB_MULTIMAP	0x0800
1471537129bSmckusick #define PDB_PARANOIA	0x2000
1481537129bSmckusick #define PDB_WIRING	0x4000
1491537129bSmckusick #define PDB_PVDUMP	0x8000
1501537129bSmckusick 
15124eb1037Shibler #ifdef HAVEVAC
1521537129bSmckusick int pmapvacflush = 0;
1531537129bSmckusick #define	PVF_ENTER	0x01
1541537129bSmckusick #define	PVF_REMOVE	0x02
1551537129bSmckusick #define	PVF_PROTECT	0x04
1561537129bSmckusick #define	PVF_TOTAL	0x80
15724eb1037Shibler #endif
1589c60e975Shibler 
15914b153a2Shibler #if defined(HP380)
16014b153a2Shibler int dowriteback = 1;	/* 68040: enable writeback caching */
16114b153a2Shibler int dokwriteback = 1;	/* 68040: enable writeback caching of kernel AS */
16214b153a2Shibler #endif
16314b153a2Shibler 
1649c60e975Shibler extern vm_offset_t pager_sva, pager_eva;
1651537129bSmckusick #endif
1661537129bSmckusick 
1671537129bSmckusick /*
1681537129bSmckusick  * Get STEs and PTEs for user/kernel address space
1691537129bSmckusick  */
17014b153a2Shibler #if defined(HP380)
17114b153a2Shibler #define	pmap_ste1(m, v)	\
17214b153a2Shibler 	(&((m)->pm_stab[(vm_offset_t)(v) >> SG4_SHIFT1]))
17314b153a2Shibler /* XXX assumes physically contiguous ST pages (if more than one) */
17414b153a2Shibler #define pmap_ste2(m, v) \
17514b153a2Shibler 	(&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
17614b153a2Shibler 			- (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
17724eb1037Shibler #define	pmap_ste(m, v)	\
17824eb1037Shibler 	(&((m)->pm_stab[(vm_offset_t)(v) \
17924eb1037Shibler 			>> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
18014b153a2Shibler #define pmap_ste_v(m, v) \
18114b153a2Shibler 	(mmutype == MMU_68040 \
18224eb1037Shibler 	 ? ((*(int *)pmap_ste1(m, v) & SG_V) && \
18324eb1037Shibler 	    (*(int *)pmap_ste2(m, v) & SG_V)) \
18424eb1037Shibler 	 : (*(int *)pmap_ste(m, v) & SG_V))
18514b153a2Shibler #else
18624eb1037Shibler #define	pmap_ste(m, v)	 (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))
18724eb1037Shibler #define pmap_ste_v(m, v) (*(int *)pmap_ste(m, v) & SG_V)
18814b153a2Shibler #endif
1891537129bSmckusick 
19024eb1037Shibler #define pmap_pte(m, v)	(&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))
19114b153a2Shibler #define pmap_pte_pa(pte)	(*(int *)(pte) & PG_FRAME)
19224eb1037Shibler #define pmap_pte_w(pte)		(*(int *)(pte) & PG_W)
19324eb1037Shibler #define pmap_pte_ci(pte)	(*(int *)(pte) & PG_CI)
19424eb1037Shibler #define pmap_pte_m(pte)		(*(int *)(pte) & PG_M)
19524eb1037Shibler #define pmap_pte_u(pte)		(*(int *)(pte) & PG_U)
19624eb1037Shibler #define pmap_pte_prot(pte)	(*(int *)(pte) & PG_PROT)
19724eb1037Shibler #define pmap_pte_v(pte)		(*(int *)(pte) & PG_V)
19824eb1037Shibler 
19924eb1037Shibler #define pmap_pte_set_w(pte, v) \
20024eb1037Shibler 	if (v) *(int *)(pte) |= PG_W; else *(int *)(pte) &= ~PG_W
20124eb1037Shibler #define pmap_pte_set_prot(pte, v) \
20224eb1037Shibler 	if (v) *(int *)(pte) |= PG_PROT; else *(int *)(pte) &= ~PG_PROT
20324eb1037Shibler #define pmap_pte_w_chg(pte, nw)		((nw) ^ pmap_pte_w(pte))
20424eb1037Shibler #define pmap_pte_prot_chg(pte, np)	((np) ^ pmap_pte_prot(pte))
2051537129bSmckusick 
2061537129bSmckusick /*
2071537129bSmckusick  * Given a map and a machine independent protection code,
20824eb1037Shibler  * convert to an hp300 protection code.
2091537129bSmckusick  */
2101537129bSmckusick #define pte_prot(m, p)	(protection_codes[p])
2111537129bSmckusick int	protection_codes[8];
2121537129bSmckusick 
2131537129bSmckusick /*
2141537129bSmckusick  * Kernel page table page management.
2151537129bSmckusick  */
2161537129bSmckusick struct kpt_page {
2171537129bSmckusick 	struct kpt_page *kpt_next;	/* link on either used or free list */
2181537129bSmckusick 	vm_offset_t	kpt_va;		/* always valid kernel VA */
2191537129bSmckusick 	vm_offset_t	kpt_pa;		/* PA of this page (for speed) */
2201537129bSmckusick };
2211537129bSmckusick struct kpt_page *kpt_free_list, *kpt_used_list;
2221537129bSmckusick struct kpt_page *kpt_pages;
2231537129bSmckusick 
2241537129bSmckusick /*
2251537129bSmckusick  * Kernel segment/page table and page table map.
2261537129bSmckusick  * The page table map gives us a level of indirection we need to dynamically
2271537129bSmckusick  * expand the page table.  It is essentially a copy of the segment table
2281537129bSmckusick  * with PTEs instead of STEs.  All are initialized in locore at boot time.
2291537129bSmckusick  * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
2301537129bSmckusick  * Segtabzero is an empty segment table which all processes share til they
2311537129bSmckusick  * reference something.
2321537129bSmckusick  */
2331537129bSmckusick st_entry_t	*Sysseg;
2341537129bSmckusick pt_entry_t	*Sysmap, *Sysptmap;
23514b153a2Shibler st_entry_t	*Segtabzero, *Segtabzeropa;
2361537129bSmckusick vm_size_t	Sysptsize = VM_KERNEL_PT_PAGES;
2371537129bSmckusick 
2381537129bSmckusick struct pmap	kernel_pmap_store;
23922ac56c6Shibler vm_map_t	st_map, pt_map;
2401537129bSmckusick 
2411537129bSmckusick vm_offset_t    	avail_start;	/* PA of first available physical page */
2421537129bSmckusick vm_offset_t	avail_end;	/* PA of last available physical page */
2431537129bSmckusick vm_size_t	mem_size;	/* memory size in bytes */
2441537129bSmckusick vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
2451537129bSmckusick vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
2461537129bSmckusick vm_offset_t	vm_first_phys;	/* PA of first managed page */
2471537129bSmckusick vm_offset_t	vm_last_phys;	/* PA just past last managed page */
2481537129bSmckusick boolean_t	pmap_initialized = FALSE;	/* Has pmap_init completed? */
2491537129bSmckusick char		*pmap_attributes;	/* reference and modify bits */
25024eb1037Shibler #ifdef HAVEVAC
25124eb1037Shibler int		pmap_aliasmask;	/* seperation at which VA aliasing ok */
25224eb1037Shibler #endif
25314b153a2Shibler #if defined(HP380)
25424eb1037Shibler int		protostfree;	/* prototype (default) free ST map */
25514b153a2Shibler #endif
2561537129bSmckusick 
25724eb1037Shibler /*
25824eb1037Shibler  * Internal routines
25924eb1037Shibler  */
26024eb1037Shibler void pmap_remove_mapping __P((pmap_t, vm_offset_t, pt_entry_t *, int));
26124eb1037Shibler boolean_t pmap_testbit	__P((vm_offset_t, int));
26224eb1037Shibler void pmap_changebit	__P((vm_offset_t, int, boolean_t));
26324eb1037Shibler void pmap_enter_ptpage	__P((pmap_t, vm_offset_t));
26424eb1037Shibler #ifdef DEBUG
26524eb1037Shibler void pmap_pvdump	__P((vm_offset_t));
26624eb1037Shibler void pmap_check_wiring	__P((char *, vm_offset_t));
26724eb1037Shibler #endif
26824eb1037Shibler 
26924eb1037Shibler /* pmap_remove_mapping flags */
27024eb1037Shibler #define	PRM_TFLUSH	1
27124eb1037Shibler #define	PRM_CFLUSH	2
2721537129bSmckusick 
2731537129bSmckusick /*
274a076b013Swilliam  * Bootstrap memory allocator. This function allows for early dynamic
275a076b013Swilliam  * memory allocation until the virtual memory system has been bootstrapped.
276a076b013Swilliam  * After that point, either kmem_alloc or malloc should be used. This
277a076b013Swilliam  * function works by stealing pages from the (to be) managed page pool,
278a076b013Swilliam  * stealing virtual address space, then mapping the pages and zeroing them.
279a076b013Swilliam  *
280a076b013Swilliam  * It should be used from pmap_bootstrap till vm_page_startup, afterwards
281a076b013Swilliam  * it cannot be used, and will generate a panic if tried. Note that this
282a076b013Swilliam  * memory will never be freed, and in essence it is wired down.
283a076b013Swilliam  */
284a076b013Swilliam void *
pmap_bootstrap_alloc(size)285a4878298Sbostic pmap_bootstrap_alloc(size)
286a4878298Sbostic 	int size;
287a4878298Sbostic {
288a076b013Swilliam 	extern boolean_t vm_page_startup_initialized;
289a4878298Sbostic 	vm_offset_t val;
290a076b013Swilliam 
291a076b013Swilliam 	if (vm_page_startup_initialized)
292a076b013Swilliam 		panic("pmap_bootstrap_alloc: called after startup initialized");
293a076b013Swilliam 	size = round_page(size);
294a076b013Swilliam 	val = virtual_avail;
295a076b013Swilliam 
296a076b013Swilliam 	virtual_avail = pmap_map(virtual_avail, avail_start,
297a076b013Swilliam 		avail_start + size, VM_PROT_READ|VM_PROT_WRITE);
298a076b013Swilliam 	avail_start += size;
299a076b013Swilliam 
300a076b013Swilliam 	blkclr ((caddr_t) val, size);
301a076b013Swilliam 	return ((void *) val);
302a076b013Swilliam }
303a076b013Swilliam 
304a076b013Swilliam /*
3051537129bSmckusick  *	Initialize the pmap module.
3061537129bSmckusick  *	Called by vm_init, to initialize any structures that the pmap
3071537129bSmckusick  *	system needs to map virtual memory.
3081537129bSmckusick  */
3091537129bSmckusick void
pmap_init(phys_start,phys_end)3101537129bSmckusick pmap_init(phys_start, phys_end)
3111537129bSmckusick 	vm_offset_t	phys_start, phys_end;
3121537129bSmckusick {
3131537129bSmckusick 	vm_offset_t	addr, addr2;
3141537129bSmckusick 	vm_size_t	npg, s;
3151537129bSmckusick 	int		rv;
316765963daSkarels 	extern char kstack[];
3171537129bSmckusick 
3181537129bSmckusick #ifdef DEBUG
3191537129bSmckusick 	if (pmapdebug & PDB_FOLLOW)
3201537129bSmckusick 		printf("pmap_init(%x, %x)\n", phys_start, phys_end);
3211537129bSmckusick #endif
3221537129bSmckusick 	/*
3231537129bSmckusick 	 * Now that kernel map has been allocated, we can mark as
3241537129bSmckusick 	 * unavailable regions which we have mapped in locore.
3251537129bSmckusick 	 */
3269c60e975Shibler 	addr = (vm_offset_t) intiobase;
327d45eccefSkarels 	(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
3289c60e975Shibler 			   &addr, hp300_ptob(IIOMAPSIZE+EIOMAPSIZE), FALSE);
3299c60e975Shibler 	if (addr != (vm_offset_t)intiobase)
3301537129bSmckusick 		goto bogons;
3311537129bSmckusick 	addr = (vm_offset_t) Sysmap;
3321537129bSmckusick 	vm_object_reference(kernel_object);
3331537129bSmckusick 	(void) vm_map_find(kernel_map, kernel_object, addr,
3341537129bSmckusick 			   &addr, HP_MAX_PTSIZE, FALSE);
3351537129bSmckusick 	/*
3361537129bSmckusick 	 * If this fails it is probably because the static portion of
3371537129bSmckusick 	 * the kernel page table isn't big enough and we overran the
3381537129bSmckusick 	 * page table map.   Need to adjust pmap_size() in hp300_init.c.
3391537129bSmckusick 	 */
3401537129bSmckusick 	if (addr != (vm_offset_t)Sysmap)
3411537129bSmckusick 		goto bogons;
3421537129bSmckusick 
343765963daSkarels 	addr = (vm_offset_t) kstack;
3441537129bSmckusick 	vm_object_reference(kernel_object);
3451537129bSmckusick 	(void) vm_map_find(kernel_map, kernel_object, addr,
3461537129bSmckusick 			   &addr, hp300_ptob(UPAGES), FALSE);
347765963daSkarels 	if (addr != (vm_offset_t)kstack)
3481537129bSmckusick bogons:
3491537129bSmckusick 		panic("pmap_init: bogons in the VM system!\n");
3501537129bSmckusick 
3511537129bSmckusick #ifdef DEBUG
3521537129bSmckusick 	if (pmapdebug & PDB_INIT) {
3531537129bSmckusick 		printf("pmap_init: Sysseg %x, Sysmap %x, Sysptmap %x\n",
3541537129bSmckusick 		       Sysseg, Sysmap, Sysptmap);
3551537129bSmckusick 		printf("  pstart %x, pend %x, vstart %x, vend %x\n",
3561537129bSmckusick 		       avail_start, avail_end, virtual_avail, virtual_end);
3571537129bSmckusick 	}
3581537129bSmckusick #endif
3591537129bSmckusick 
3601537129bSmckusick 	/*
3611537129bSmckusick 	 * Allocate memory for random pmap data structures.  Includes the
3621537129bSmckusick 	 * initial segment table, pv_head_table and pmap_attributes.
3631537129bSmckusick 	 */
3641537129bSmckusick 	npg = atop(phys_end - phys_start);
3651537129bSmckusick 	s = (vm_size_t) (HP_STSIZE + sizeof(struct pv_entry) * npg + npg);
3661537129bSmckusick 	s = round_page(s);
3671537129bSmckusick 	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
3681537129bSmckusick 	Segtabzero = (st_entry_t *) addr;
36914b153a2Shibler 	Segtabzeropa = (st_entry_t *) pmap_extract(kernel_pmap, addr);
3701537129bSmckusick 	addr += HP_STSIZE;
3711537129bSmckusick 	pv_table = (pv_entry_t) addr;
3721537129bSmckusick 	addr += sizeof(struct pv_entry) * npg;
3731537129bSmckusick 	pmap_attributes = (char *) addr;
3741537129bSmckusick #ifdef DEBUG
3751537129bSmckusick 	if (pmapdebug & PDB_INIT)
37614b153a2Shibler 		printf("pmap_init: %x bytes: npg %x s0 %x(%x) tbl %x atr %x\n",
37714b153a2Shibler 		       s, npg, Segtabzero, Segtabzeropa,
37814b153a2Shibler 		       pv_table, pmap_attributes);
3791537129bSmckusick #endif
3801537129bSmckusick 
3811537129bSmckusick 	/*
3821537129bSmckusick 	 * Allocate physical memory for kernel PT pages and their management.
3831537129bSmckusick 	 * We need 1 PT page per possible task plus some slop.
3841537129bSmckusick 	 */
385d45eccefSkarels 	npg = min(atop(HP_MAX_KPTSIZE), maxproc+16);
3861537129bSmckusick 	s = ptoa(npg) + round_page(npg * sizeof(struct kpt_page));
3871537129bSmckusick 
3881537129bSmckusick 	/*
3891537129bSmckusick 	 * Verify that space will be allocated in region for which
3901537129bSmckusick 	 * we already have kernel PT pages.
3911537129bSmckusick 	 */
3921537129bSmckusick 	addr = 0;
393d45eccefSkarels 	rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
3941537129bSmckusick 	if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap)
3951537129bSmckusick 		panic("pmap_init: kernel PT too small");
3961537129bSmckusick 	vm_map_remove(kernel_map, addr, addr + s);
3971537129bSmckusick 
3981537129bSmckusick 	/*
3991537129bSmckusick 	 * Now allocate the space and link the pages together to
4001537129bSmckusick 	 * form the KPT free list.
4011537129bSmckusick 	 */
4021537129bSmckusick 	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
4031537129bSmckusick 	s = ptoa(npg);
4041537129bSmckusick 	addr2 = addr + s;
4051537129bSmckusick 	kpt_pages = &((struct kpt_page *)addr2)[npg];
4061537129bSmckusick 	kpt_free_list = (struct kpt_page *) 0;
4071537129bSmckusick 	do {
4081537129bSmckusick 		addr2 -= HP_PAGE_SIZE;
4091537129bSmckusick 		(--kpt_pages)->kpt_next = kpt_free_list;
4101537129bSmckusick 		kpt_free_list = kpt_pages;
4111537129bSmckusick 		kpt_pages->kpt_va = addr2;
4121537129bSmckusick 		kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2);
4131537129bSmckusick 	} while (addr != addr2);
41424eb1037Shibler #ifdef PMAPSTATS
4151537129bSmckusick 	kpt_stats.kpttotal = atop(s);
41624eb1037Shibler #endif
41724eb1037Shibler #ifdef DEBUG
4181537129bSmckusick 	if (pmapdebug & PDB_INIT)
4191537129bSmckusick 		printf("pmap_init: KPT: %d pages from %x to %x\n",
4201537129bSmckusick 		       atop(s), addr, addr + s);
4211537129bSmckusick #endif
4221537129bSmckusick 
4231537129bSmckusick 	/*
42422ac56c6Shibler 	 * Allocate the segment table map
42522ac56c6Shibler 	 */
42622ac56c6Shibler 	s = maxproc * HP_STSIZE;
42722ac56c6Shibler 	st_map = kmem_suballoc(kernel_map, &addr, &addr2, s, TRUE);
42822ac56c6Shibler 
42922ac56c6Shibler 	/*
4301537129bSmckusick 	 * Slightly modified version of kmem_suballoc() to get page table
4311537129bSmckusick 	 * map where we want it.
4321537129bSmckusick 	 */
4331537129bSmckusick 	addr = HP_PTBASE;
434d45eccefSkarels 	s = min(HP_PTMAXSIZE, maxproc*HP_MAX_PTSIZE);
4351537129bSmckusick 	addr2 = addr + s;
436d45eccefSkarels 	rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
4371537129bSmckusick 	if (rv != KERN_SUCCESS)
4381537129bSmckusick 		panic("pmap_init: cannot allocate space for PT map");
4391537129bSmckusick 	pmap_reference(vm_map_pmap(kernel_map));
4401537129bSmckusick 	pt_map = vm_map_create(vm_map_pmap(kernel_map), addr, addr2, TRUE);
441d45eccefSkarels 	if (pt_map == NULL)
4421537129bSmckusick 		panic("pmap_init: cannot create pt_map");
4431537129bSmckusick 	rv = vm_map_submap(kernel_map, addr, addr2, pt_map);
4441537129bSmckusick 	if (rv != KERN_SUCCESS)
4451537129bSmckusick 		panic("pmap_init: cannot map range to pt_map");
4461537129bSmckusick #ifdef DEBUG
4471537129bSmckusick 	if (pmapdebug & PDB_INIT)
4481537129bSmckusick 		printf("pmap_init: pt_map [%x - %x)\n", addr, addr2);
4491537129bSmckusick #endif
4501537129bSmckusick 
45114b153a2Shibler #if defined(HP380)
45214b153a2Shibler 	if (mmutype == MMU_68040) {
45314b153a2Shibler 		protostfree = ~l2tobm(0);
45414b153a2Shibler 		for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
45514b153a2Shibler 			protostfree &= ~l2tobm(rv);
45614b153a2Shibler 	}
45714b153a2Shibler #endif
45814b153a2Shibler 
4591537129bSmckusick 	/*
4601537129bSmckusick 	 * Now it is safe to enable pv_table recording.
4611537129bSmckusick 	 */
4621537129bSmckusick 	vm_first_phys = phys_start;
4631537129bSmckusick 	vm_last_phys = phys_end;
4641537129bSmckusick 	pmap_initialized = TRUE;
4651537129bSmckusick }
4661537129bSmckusick 
4671537129bSmckusick /*
4681537129bSmckusick  *	Used to map a range of physical addresses into kernel
4691537129bSmckusick  *	virtual address space.
4701537129bSmckusick  *
4711537129bSmckusick  *	For now, VM is already on, we only need to map the
4721537129bSmckusick  *	specified memory.
4731537129bSmckusick  */
4741537129bSmckusick vm_offset_t
pmap_map(virt,start,end,prot)4751537129bSmckusick pmap_map(virt, start, end, prot)
4761537129bSmckusick 	vm_offset_t	virt;
4771537129bSmckusick 	vm_offset_t	start;
4781537129bSmckusick 	vm_offset_t	end;
4791537129bSmckusick 	int		prot;
4801537129bSmckusick {
4811537129bSmckusick #ifdef DEBUG
4821537129bSmckusick 	if (pmapdebug & PDB_FOLLOW)
4831537129bSmckusick 		printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
4841537129bSmckusick #endif
4851537129bSmckusick 	while (start < end) {
4861537129bSmckusick 		pmap_enter(kernel_pmap, virt, start, prot, FALSE);
4871537129bSmckusick 		virt += PAGE_SIZE;
4881537129bSmckusick 		start += PAGE_SIZE;
4891537129bSmckusick 	}
4901537129bSmckusick 	return(virt);
4911537129bSmckusick }
4921537129bSmckusick 
4931537129bSmckusick /*
4941537129bSmckusick  *	Create and return a physical map.
4951537129bSmckusick  *
4961537129bSmckusick  *	If the size specified for the map
4971537129bSmckusick  *	is zero, the map is an actual physical
4981537129bSmckusick  *	map, and may be referenced by the
4991537129bSmckusick  *	hardware.
5001537129bSmckusick  *
5011537129bSmckusick  *	If the size specified is non-zero,
5021537129bSmckusick  *	the map will be used in software only, and
5031537129bSmckusick  *	is bounded by that size.
5041537129bSmckusick  */
5051537129bSmckusick pmap_t
pmap_create(size)5061537129bSmckusick pmap_create(size)
5071537129bSmckusick 	vm_size_t	size;
5081537129bSmckusick {
5091537129bSmckusick 	register pmap_t pmap;
5101537129bSmckusick 
5111537129bSmckusick #ifdef DEBUG
5121537129bSmckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
5131537129bSmckusick 		printf("pmap_create(%x)\n", size);
5141537129bSmckusick #endif
5151537129bSmckusick 	/*
5161537129bSmckusick 	 * Software use map does not need a pmap
5171537129bSmckusick 	 */
5181537129bSmckusick 	if (size)
519d45eccefSkarels 		return(NULL);
5201537129bSmckusick 
5211537129bSmckusick 	/* XXX: is it ok to wait here? */
5221537129bSmckusick 	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
523d45eccefSkarels #ifdef notifwewait
524d45eccefSkarels 	if (pmap == NULL)
5251537129bSmckusick 		panic("pmap_create: cannot allocate a pmap");
526d45eccefSkarels #endif
527d45eccefSkarels 	bzero(pmap, sizeof(*pmap));
528d45eccefSkarels 	pmap_pinit(pmap);
529d45eccefSkarels 	return (pmap);
530d45eccefSkarels }
5311537129bSmckusick 
5321537129bSmckusick /*
533d45eccefSkarels  * Initialize a preallocated and zeroed pmap structure,
534d45eccefSkarels  * such as one in a vmspace structure.
535d45eccefSkarels  */
536d45eccefSkarels void
pmap_pinit(pmap)537d45eccefSkarels pmap_pinit(pmap)
538d45eccefSkarels 	register struct pmap *pmap;
539d45eccefSkarels {
540d45eccefSkarels 
541d45eccefSkarels #ifdef DEBUG
542d45eccefSkarels 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
543d45eccefSkarels 		printf("pmap_pinit(%x)\n", pmap);
544d45eccefSkarels #endif
545d45eccefSkarels 	/*
5461537129bSmckusick 	 * No need to allocate page table space yet but we do need a
5471537129bSmckusick 	 * valid segment table.  Initially, we point everyone at the
5481537129bSmckusick 	 * "null" segment table.  On the first pmap_enter, a real
5491537129bSmckusick 	 * segment table will be allocated.
5501537129bSmckusick 	 */
5511537129bSmckusick 	pmap->pm_stab = Segtabzero;
55214b153a2Shibler 	pmap->pm_stpa = Segtabzeropa;
55314b153a2Shibler #if defined(HP380)
55414b153a2Shibler 	if (mmutype == MMU_68040)
55514b153a2Shibler 		pmap->pm_stfree = protostfree;
55614b153a2Shibler #endif
5571537129bSmckusick 	pmap->pm_stchanged = TRUE;
5581537129bSmckusick 	pmap->pm_count = 1;
5591537129bSmckusick 	simple_lock_init(&pmap->pm_lock);
5601537129bSmckusick }
5611537129bSmckusick 
5621537129bSmckusick /*
5631537129bSmckusick  *	Retire the given physical map from service.
5641537129bSmckusick  *	Should only be called if the map contains
5651537129bSmckusick  *	no valid mappings.
5661537129bSmckusick  */
5671537129bSmckusick void
pmap_destroy(pmap)5681537129bSmckusick pmap_destroy(pmap)
5691537129bSmckusick 	register pmap_t pmap;
5701537129bSmckusick {
5711537129bSmckusick 	int count;
5721537129bSmckusick 
5731537129bSmckusick #ifdef DEBUG
5741537129bSmckusick 	if (pmapdebug & PDB_FOLLOW)
5751537129bSmckusick 		printf("pmap_destroy(%x)\n", pmap);
5761537129bSmckusick #endif
577d45eccefSkarels 	if (pmap == NULL)
5781537129bSmckusick 		return;
5791537129bSmckusick 
5801537129bSmckusick 	simple_lock(&pmap->pm_lock);
5811537129bSmckusick 	count = --pmap->pm_count;
5821537129bSmckusick 	simple_unlock(&pmap->pm_lock);
583d45eccefSkarels 	if (count == 0) {
584d45eccefSkarels 		pmap_release(pmap);
585d45eccefSkarels 		free((caddr_t)pmap, M_VMPMAP);
586d45eccefSkarels 	}
587d45eccefSkarels }
5881537129bSmckusick 
589d45eccefSkarels /*
590d45eccefSkarels  * Release any resources held by the given physical map.
591d45eccefSkarels  * Called when a pmap initialized by pmap_pinit is being released.
592d45eccefSkarels  * Should only be called if the map contains no valid mappings.
593d45eccefSkarels  */
594d45eccefSkarels void
pmap_release(pmap)595d45eccefSkarels pmap_release(pmap)
596d45eccefSkarels 	register struct pmap *pmap;
597d45eccefSkarels {
598d45eccefSkarels 
599d45eccefSkarels #ifdef DEBUG
600d45eccefSkarels 	if (pmapdebug & PDB_FOLLOW)
601d45eccefSkarels 		printf("pmap_release(%x)\n", pmap);
602d45eccefSkarels #endif
603d45eccefSkarels #ifdef notdef /* DIAGNOSTIC */
604d45eccefSkarels 	/* count would be 0 from pmap_destroy... */
605d45eccefSkarels 	simple_lock(&pmap->pm_lock);
606d45eccefSkarels 	if (pmap->pm_count != 1)
607d45eccefSkarels 		panic("pmap_release count");
608d45eccefSkarels #endif
6091537129bSmckusick 	if (pmap->pm_ptab)
6101537129bSmckusick 		kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
6111537129bSmckusick 				 HP_MAX_PTSIZE);
6121537129bSmckusick 	if (pmap->pm_stab != Segtabzero)
61322ac56c6Shibler 		kmem_free_wakeup(st_map, (vm_offset_t)pmap->pm_stab,
61422ac56c6Shibler 				 HP_STSIZE);
6151537129bSmckusick }
6161537129bSmckusick 
6171537129bSmckusick /*
6181537129bSmckusick  *	Add a reference to the specified pmap.
6191537129bSmckusick  */
6201537129bSmckusick void
pmap_reference(pmap)6211537129bSmckusick pmap_reference(pmap)
6221537129bSmckusick 	pmap_t	pmap;
6231537129bSmckusick {
6241537129bSmckusick #ifdef DEBUG
6251537129bSmckusick 	if (pmapdebug & PDB_FOLLOW)
6261537129bSmckusick 		printf("pmap_reference(%x)\n", pmap);
6271537129bSmckusick #endif
628d45eccefSkarels 	if (pmap != NULL) {
6291537129bSmckusick 		simple_lock(&pmap->pm_lock);
6301537129bSmckusick 		pmap->pm_count++;
6311537129bSmckusick 		simple_unlock(&pmap->pm_lock);
6321537129bSmckusick 	}
6331537129bSmckusick }
6341537129bSmckusick 
6351537129bSmckusick /*
6361537129bSmckusick  *	Remove the given range of addresses from the specified map.
6371537129bSmckusick  *
6381537129bSmckusick  *	It is assumed that the start and end are properly
6391537129bSmckusick  *	rounded to the page size.
6401537129bSmckusick  */
6411537129bSmckusick void
pmap_remove(pmap,sva,eva)6421537129bSmckusick pmap_remove(pmap, sva, eva)
6431537129bSmckusick 	register pmap_t pmap;
64424eb1037Shibler 	register vm_offset_t sva, eva;
6451537129bSmckusick {
64624eb1037Shibler 	register vm_offset_t nssva;
6471537129bSmckusick 	register pt_entry_t *pte;
64824eb1037Shibler 	boolean_t firstpage, needcflush;
64924eb1037Shibler 	int flags;
6501537129bSmckusick 
65124eb1037Shibler #ifdef DEBUG
6521537129bSmckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
6531537129bSmckusick 		printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
6541537129bSmckusick #endif
6551537129bSmckusick 
656d45eccefSkarels 	if (pmap == NULL)
6571537129bSmckusick 		return;
6581537129bSmckusick 
65924eb1037Shibler #ifdef PMAPSTATS
6601537129bSmckusick 	remove_stats.calls++;
6611537129bSmckusick #endif
66224eb1037Shibler 	firstpage = TRUE;
66324eb1037Shibler 	needcflush = FALSE;
66424eb1037Shibler 	flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
66524eb1037Shibler 	while (sva < eva) {
66624eb1037Shibler 		nssva = hp300_trunc_seg(sva) + HP_SEG_SIZE;
66724eb1037Shibler 		if (nssva == 0 || nssva > eva)
66824eb1037Shibler 			nssva = eva;
6691537129bSmckusick 		/*
67024eb1037Shibler 		 * If VA belongs to an unallocated segment,
67124eb1037Shibler 		 * skip to the next segment boundary.
6721537129bSmckusick 		 */
67324eb1037Shibler 		if (!pmap_ste_v(pmap, sva)) {
67424eb1037Shibler 			sva = nssva;
6751537129bSmckusick 			continue;
6761537129bSmckusick 		}
6771537129bSmckusick 		/*
67824eb1037Shibler 		 * Invalidate every valid mapping within this segment.
6791537129bSmckusick 		 */
68024eb1037Shibler 		pte = pmap_pte(pmap, sva);
68124eb1037Shibler 		while (sva < nssva) {
68224eb1037Shibler 			if (pmap_pte_v(pte)) {
68324eb1037Shibler #ifdef HAVEVAC
68424eb1037Shibler 				if (pmap_aliasmask) {
6851537129bSmckusick 					/*
68624eb1037Shibler 					 * Purge kernel side of VAC to ensure
68724eb1037Shibler 					 * we get the correct state of any
6881537129bSmckusick 					 * hardware maintained bits.
6891537129bSmckusick 					 */
69024eb1037Shibler 					if (firstpage) {
6911537129bSmckusick 						DCIS();
69224eb1037Shibler #ifdef PMAPSTATS
6931537129bSmckusick 						remove_stats.sflushes++;
6941537129bSmckusick #endif
6951537129bSmckusick 					}
69624eb1037Shibler 					/*
69724eb1037Shibler 					 * Remember if we may need to
69824eb1037Shibler 					 * flush the VAC due to a non-CI
69924eb1037Shibler 					 * mapping.
70024eb1037Shibler 					 */
70124eb1037Shibler 					if (!needcflush && !pmap_pte_ci(pte))
70224eb1037Shibler 						needcflush = TRUE;
70314b153a2Shibler 
70414b153a2Shibler 				}
70514b153a2Shibler #endif
70624eb1037Shibler 				pmap_remove_mapping(pmap, sva, pte, flags);
70724eb1037Shibler 				firstpage = FALSE;
70824eb1037Shibler 			}
70924eb1037Shibler 			pte++;
71024eb1037Shibler 			sva += PAGE_SIZE;
71124eb1037Shibler 		}
7121537129bSmckusick 	}
7131537129bSmckusick 	/*
71424eb1037Shibler 	 * Didn't do anything, no need for cache flushes
7151537129bSmckusick 	 */
71624eb1037Shibler 	if (firstpage)
71724eb1037Shibler 		return;
71824eb1037Shibler #ifdef HAVEVAC
7191537129bSmckusick 	/*
72024eb1037Shibler 	 * In a couple of cases, we don't need to worry about flushing
72124eb1037Shibler 	 * the VAC:
72224eb1037Shibler 	 * 	1. if this is a kernel mapping,
72324eb1037Shibler 	 *	   we have already done it
72424eb1037Shibler 	 *	2. if it is a user mapping not for the current process,
72524eb1037Shibler 	 *	   it won't be there
7261537129bSmckusick 	 */
72724eb1037Shibler 	if (pmap_aliasmask &&
72824eb1037Shibler 	    (pmap == kernel_pmap || pmap != curproc->p_vmspace->vm_map.pmap))
72924eb1037Shibler 		needcflush = FALSE;
7301537129bSmckusick #ifdef DEBUG
73124eb1037Shibler 	if (pmap_aliasmask && (pmapvacflush & PVF_REMOVE)) {
7321537129bSmckusick 		if (pmapvacflush & PVF_TOTAL)
7331537129bSmckusick 			DCIA();
7341537129bSmckusick 		else if (pmap == kernel_pmap)
7351537129bSmckusick 			DCIS();
7361537129bSmckusick 		else
7371537129bSmckusick 			DCIU();
73824eb1037Shibler 	} else
7391537129bSmckusick #endif
74024eb1037Shibler 	if (needcflush) {
7411537129bSmckusick 		if (pmap == kernel_pmap) {
7421537129bSmckusick 			DCIS();
74324eb1037Shibler #ifdef PMAPSTATS
7441537129bSmckusick 			remove_stats.sflushes++;
7451537129bSmckusick #endif
7461537129bSmckusick 		} else {
7471537129bSmckusick 			DCIU();
74824eb1037Shibler #ifdef PMAPSTATS
7491537129bSmckusick 			remove_stats.uflushes++;
7501537129bSmckusick #endif
7511537129bSmckusick 		}
7521537129bSmckusick 	}
75324eb1037Shibler #endif
7541537129bSmckusick }
7551537129bSmckusick 
7561537129bSmckusick /*
7579c60e975Shibler  *	pmap_page_protect:
7589c60e975Shibler  *
7599c60e975Shibler  *	Lower the permission for all mappings to a given page.
7601537129bSmckusick  */
7611537129bSmckusick void
pmap_page_protect(pa,prot)7629c60e975Shibler pmap_page_protect(pa, prot)
7631537129bSmckusick 	vm_offset_t	pa;
7649c60e975Shibler 	vm_prot_t	prot;
7651537129bSmckusick {
7661537129bSmckusick 	register pv_entry_t pv;
7671537129bSmckusick 	int s;
7681537129bSmckusick 
7691537129bSmckusick #ifdef DEBUG
7709c60e975Shibler 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
7719c60e975Shibler 	    prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
7729c60e975Shibler 		printf("pmap_page_protect(%x, %x)\n", pa, prot);
7731537129bSmckusick #endif
7741537129bSmckusick 	if (pa < vm_first_phys || pa >= vm_last_phys)
7751537129bSmckusick 		return;
7761537129bSmckusick 
7779c60e975Shibler 	switch (prot) {
77824eb1037Shibler 	case VM_PROT_READ|VM_PROT_WRITE:
7799c60e975Shibler 	case VM_PROT_ALL:
7801d2d1332Shibler 		return;
7819c60e975Shibler 	/* copy_on_write */
7829c60e975Shibler 	case VM_PROT_READ:
7839c60e975Shibler 	case VM_PROT_READ|VM_PROT_EXECUTE:
7849c60e975Shibler 		pmap_changebit(pa, PG_RO, TRUE);
7851d2d1332Shibler 		return;
7869c60e975Shibler 	/* remove_all */
7879c60e975Shibler 	default:
7881d2d1332Shibler 		break;
7891d2d1332Shibler 	}
7901537129bSmckusick 	pv = pa_to_pvh(pa);
7911537129bSmckusick 	s = splimp();
792d45eccefSkarels 	while (pv->pv_pmap != NULL) {
7931d2d1332Shibler 		register pt_entry_t *pte;
7941d2d1332Shibler 
7951d2d1332Shibler 		pte = pmap_pte(pv->pv_pmap, pv->pv_va);
7961537129bSmckusick #ifdef DEBUG
79714b153a2Shibler 		if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
7981d2d1332Shibler 		    pmap_pte_pa(pte) != pa)
7999c60e975Shibler 			panic("pmap_page_protect: bad mapping");
8001537129bSmckusick #endif
8011d2d1332Shibler 		if (!pmap_pte_w(pte))
80224eb1037Shibler 			pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
8031d2d1332Shibler 					    pte, PRM_TFLUSH|PRM_CFLUSH);
8041d2d1332Shibler 		else {
8051d2d1332Shibler 			pv = pv->pv_next;
8061d2d1332Shibler #ifdef DEBUG
8071d2d1332Shibler 			if (pmapdebug & PDB_PARANOIA)
8081d2d1332Shibler 				printf("%s wired mapping for %x not removed\n",
8091d2d1332Shibler 				       "pmap_page_protect:", pa);
8101d2d1332Shibler #endif
8111d2d1332Shibler 		}
8121537129bSmckusick 	}
8131537129bSmckusick 	splx(s);
8141537129bSmckusick }
8151537129bSmckusick 
8161537129bSmckusick /*
8171537129bSmckusick  *	Set the physical protection on the
8181537129bSmckusick  *	specified range of this map as requested.
8191537129bSmckusick  */
8201537129bSmckusick void
pmap_protect(pmap,sva,eva,prot)8211537129bSmckusick pmap_protect(pmap, sva, eva, prot)
8221537129bSmckusick 	register pmap_t	pmap;
82324eb1037Shibler 	register vm_offset_t sva, eva;
8241537129bSmckusick 	vm_prot_t prot;
8251537129bSmckusick {
82624eb1037Shibler 	register vm_offset_t nssva;
8271537129bSmckusick 	register pt_entry_t *pte;
82824eb1037Shibler 	boolean_t firstpage, needtflush;
82924eb1037Shibler 	int isro;
8301537129bSmckusick 
8311537129bSmckusick #ifdef DEBUG
8321537129bSmckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
8331537129bSmckusick 		printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
8341537129bSmckusick #endif
83524eb1037Shibler 
836d45eccefSkarels 	if (pmap == NULL)
8371537129bSmckusick 		return;
8381537129bSmckusick 
83924eb1037Shibler #ifdef PMAPSTATS
84024eb1037Shibler 	protect_stats.calls++;
84124eb1037Shibler #endif
8421537129bSmckusick 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
8431537129bSmckusick 		pmap_remove(pmap, sva, eva);
8441537129bSmckusick 		return;
8451537129bSmckusick 	}
8461537129bSmckusick 	if (prot & VM_PROT_WRITE)
8471537129bSmckusick 		return;
8481537129bSmckusick 
84924eb1037Shibler 	isro = pte_prot(pmap, prot);
85024eb1037Shibler 	needtflush = active_pmap(pmap);
85124eb1037Shibler 	firstpage = TRUE;
85224eb1037Shibler 	while (sva < eva) {
85324eb1037Shibler 		nssva = hp300_trunc_seg(sva) + HP_SEG_SIZE;
85424eb1037Shibler 		if (nssva == 0 || nssva > eva)
85524eb1037Shibler 			nssva = eva;
85624eb1037Shibler 		/*
85724eb1037Shibler 		 * If VA belongs to an unallocated segment,
85824eb1037Shibler 		 * skip to the next segment boundary.
85924eb1037Shibler 		 */
86024eb1037Shibler 		if (!pmap_ste_v(pmap, sva)) {
86124eb1037Shibler 			sva = nssva;
86224eb1037Shibler 			continue;
86324eb1037Shibler 		}
86424eb1037Shibler 		/*
86524eb1037Shibler 		 * Change protection on mapping if it is valid and doesn't
86624eb1037Shibler 		 * already have the correct protection.
86724eb1037Shibler 		 */
8681537129bSmckusick 		pte = pmap_pte(pmap, sva);
86924eb1037Shibler 		while (sva < nssva) {
87024eb1037Shibler 			if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
87124eb1037Shibler #ifdef HAVEVAC
8721537129bSmckusick 				/*
87324eb1037Shibler 				 * Purge kernel side of VAC to ensure we
87424eb1037Shibler 				 * get the correct state of any hardware
87524eb1037Shibler 				 * maintained bits.
87624eb1037Shibler 				 *
87724eb1037Shibler 				 * XXX do we need to clear the VAC in
87824eb1037Shibler 				 * general to reflect the new protection?
8791537129bSmckusick 				 */
88024eb1037Shibler 				if (firstpage && pmap_aliasmask)
8811537129bSmckusick 					DCIS();
88224eb1037Shibler #endif
88314b153a2Shibler #if defined(HP380)
88424eb1037Shibler 				/*
88524eb1037Shibler 				 * Clear caches if making RO (see section
88624eb1037Shibler 				 * "7.3 Cache Coherency" in the manual).
88724eb1037Shibler 				 */
88824eb1037Shibler 				if (isro && mmutype == MMU_68040) {
88914b153a2Shibler 					vm_offset_t pa = pmap_pte_pa(pte);
89014b153a2Shibler 
89114b153a2Shibler 					DCFP(pa);
89214b153a2Shibler 					ICPP(pa);
89314b153a2Shibler 				}
89414b153a2Shibler #endif
89524eb1037Shibler 				pmap_pte_set_prot(pte, isro);
89624eb1037Shibler 				if (needtflush)
89724eb1037Shibler 					TBIS(sva);
89824eb1037Shibler #ifdef PMAPSTATS
89924eb1037Shibler 				protect_stats.changed++;
90024eb1037Shibler #endif
90124eb1037Shibler 				firstpage = FALSE;
90224eb1037Shibler 			}
90324eb1037Shibler #ifdef PMAPSTATS
90424eb1037Shibler 			else if (pmap_pte_v(pte)) {
90524eb1037Shibler 				if (isro)
90614b153a2Shibler 					protect_stats.alreadyro++;
90724eb1037Shibler 				else
90814b153a2Shibler 					protect_stats.alreadyrw++;
90914b153a2Shibler 			}
91014b153a2Shibler #endif
91124eb1037Shibler 			pte++;
91224eb1037Shibler 			sva += PAGE_SIZE;
91314b153a2Shibler 		}
91424eb1037Shibler 	}
91524eb1037Shibler #if defined(HAVEVAC) && defined(DEBUG)
91624eb1037Shibler 	if (pmap_aliasmask && (pmapvacflush & PVF_PROTECT)) {
9171537129bSmckusick 		if (pmapvacflush & PVF_TOTAL)
9181537129bSmckusick 			DCIA();
9191537129bSmckusick 		else if (pmap == kernel_pmap)
9201537129bSmckusick 			DCIS();
9211537129bSmckusick 		else
9221537129bSmckusick 			DCIU();
9231537129bSmckusick 	}
9241537129bSmckusick #endif
9251537129bSmckusick }
9261537129bSmckusick 
9271537129bSmckusick /*
9281537129bSmckusick  *	Insert the given physical page (p) at
9291537129bSmckusick  *	the specified virtual address (v) in the
9301537129bSmckusick  *	target physical map with the protection requested.
9311537129bSmckusick  *
9321537129bSmckusick  *	If specified, the page will be wired down, meaning
9331537129bSmckusick  *	that the related pte can not be reclaimed.
9341537129bSmckusick  *
9351537129bSmckusick  *	NB:  This is the only routine which MAY NOT lazy-evaluate
9361537129bSmckusick  *	or lose information.  That is, this routine must actually
9371537129bSmckusick  *	insert this page into the given map NOW.
9381537129bSmckusick  */
9391537129bSmckusick void
pmap_enter(pmap,va,pa,prot,wired)9401537129bSmckusick pmap_enter(pmap, va, pa, prot, wired)
9411537129bSmckusick 	register pmap_t pmap;
9421537129bSmckusick 	vm_offset_t va;
9431537129bSmckusick 	register vm_offset_t pa;
9441537129bSmckusick 	vm_prot_t prot;
9451537129bSmckusick 	boolean_t wired;
9461537129bSmckusick {
9471537129bSmckusick 	register pt_entry_t *pte;
94814b153a2Shibler 	register int npte;
9491537129bSmckusick 	vm_offset_t opa;
9501537129bSmckusick 	boolean_t cacheable = TRUE;
9511537129bSmckusick 	boolean_t checkpv = TRUE;
9521537129bSmckusick 
9531537129bSmckusick #ifdef DEBUG
9541537129bSmckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
9551537129bSmckusick 		printf("pmap_enter(%x, %x, %x, %x, %x)\n",
9561537129bSmckusick 		       pmap, va, pa, prot, wired);
9571537129bSmckusick #endif
958d45eccefSkarels 	if (pmap == NULL)
9591537129bSmckusick 		return;
9601537129bSmckusick 
96124eb1037Shibler #ifdef PMAPSTATS
9621537129bSmckusick 	if (pmap == kernel_pmap)
9631537129bSmckusick 		enter_stats.kernel++;
9641537129bSmckusick 	else
9651537129bSmckusick 		enter_stats.user++;
9661537129bSmckusick #endif
9671537129bSmckusick 	/*
9681537129bSmckusick 	 * For user mapping, allocate kernel VM resources if necessary.
9691537129bSmckusick 	 */
970d45eccefSkarels 	if (pmap->pm_ptab == NULL)
9711537129bSmckusick 		pmap->pm_ptab = (pt_entry_t *)
9721537129bSmckusick 			kmem_alloc_wait(pt_map, HP_MAX_PTSIZE);
9731537129bSmckusick 
9741537129bSmckusick 	/*
9751537129bSmckusick 	 * Segment table entry not valid, we need a new PT page
9761537129bSmckusick 	 */
97714b153a2Shibler 	if (!pmap_ste_v(pmap, va))
9781537129bSmckusick 		pmap_enter_ptpage(pmap, va);
9791537129bSmckusick 
98024eb1037Shibler 	pa = hp300_trunc_page(pa);
9811537129bSmckusick 	pte = pmap_pte(pmap, va);
9821537129bSmckusick 	opa = pmap_pte_pa(pte);
9831537129bSmckusick #ifdef DEBUG
9841537129bSmckusick 	if (pmapdebug & PDB_ENTER)
9851537129bSmckusick 		printf("enter: pte %x, *pte %x\n", pte, *(int *)pte);
9861537129bSmckusick #endif
9871537129bSmckusick 
9881537129bSmckusick 	/*
9891537129bSmckusick 	 * Mapping has not changed, must be protection or wiring change.
9901537129bSmckusick 	 */
9911537129bSmckusick 	if (opa == pa) {
99224eb1037Shibler #ifdef PMAPSTATS
9931537129bSmckusick 		enter_stats.pwchange++;
9941537129bSmckusick #endif
9951537129bSmckusick 		/*
9961537129bSmckusick 		 * Wiring change, just update stats.
9971537129bSmckusick 		 * We don't worry about wiring PT pages as they remain
9981537129bSmckusick 		 * resident as long as there are valid mappings in them.
9991537129bSmckusick 		 * Hence, if a user page is wired, the PT page will be also.
10001537129bSmckusick 		 */
100124eb1037Shibler 		if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
10021537129bSmckusick #ifdef DEBUG
10031537129bSmckusick 			if (pmapdebug & PDB_ENTER)
10041537129bSmckusick 				printf("enter: wiring change -> %x\n", wired);
10051537129bSmckusick #endif
10061537129bSmckusick 			if (wired)
10071537129bSmckusick 				pmap->pm_stats.wired_count++;
10081537129bSmckusick 			else
10091537129bSmckusick 				pmap->pm_stats.wired_count--;
101024eb1037Shibler #ifdef PMAPSTATS
101124eb1037Shibler 			if (pmap_pte_prot(pte) == pte_prot(pmap, prot))
10121537129bSmckusick 				enter_stats.wchange++;
10131537129bSmckusick #endif
10141537129bSmckusick 		}
101524eb1037Shibler #ifdef PMAPSTATS
101624eb1037Shibler 		else if (pmap_pte_prot(pte) != pte_prot(pmap, prot))
101724eb1037Shibler 			enter_stats.pchange++;
101824eb1037Shibler 		else
101924eb1037Shibler 			enter_stats.nochange++;
102024eb1037Shibler #endif
10211537129bSmckusick 		/*
10221537129bSmckusick 		 * Retain cache inhibition status
10231537129bSmckusick 		 */
10241537129bSmckusick 		checkpv = FALSE;
10251537129bSmckusick 		if (pmap_pte_ci(pte))
10261537129bSmckusick 			cacheable = FALSE;
10271537129bSmckusick 		goto validate;
10281537129bSmckusick 	}
10291537129bSmckusick 
10301537129bSmckusick 	/*
10311537129bSmckusick 	 * Mapping has changed, invalidate old range and fall through to
10321537129bSmckusick 	 * handle validating new mapping.
10331537129bSmckusick 	 */
10341537129bSmckusick 	if (opa) {
10351537129bSmckusick #ifdef DEBUG
10361537129bSmckusick 		if (pmapdebug & PDB_ENTER)
10371537129bSmckusick 			printf("enter: removing old mapping %x\n", va);
10381537129bSmckusick #endif
103924eb1037Shibler 		pmap_remove_mapping(pmap, va, pte, PRM_TFLUSH|PRM_CFLUSH);
104024eb1037Shibler #ifdef PMAPSTATS
10411537129bSmckusick 		enter_stats.mchange++;
10421537129bSmckusick #endif
10431537129bSmckusick 	}
10441537129bSmckusick 
10451537129bSmckusick 	/*
10461537129bSmckusick 	 * If this is a new user mapping, increment the wiring count
10471537129bSmckusick 	 * on this PT page.  PT pages are wired down as long as there
10481537129bSmckusick 	 * is a valid mapping in the page.
10491537129bSmckusick 	 */
10501537129bSmckusick 	if (pmap != kernel_pmap)
105181dd55b1Shibler 		(void) vm_map_pageable(pt_map, trunc_page(pte),
10521537129bSmckusick 				       round_page(pte+1), FALSE);
10531537129bSmckusick 
10541537129bSmckusick 	/*
10551537129bSmckusick 	 * Enter on the PV list if part of our managed memory
10561537129bSmckusick 	 * Note that we raise IPL while manipulating pv_table
10571537129bSmckusick 	 * since pmap_enter can be called at interrupt time.
10581537129bSmckusick 	 */
10591537129bSmckusick 	if (pa >= vm_first_phys && pa < vm_last_phys) {
10601537129bSmckusick 		register pv_entry_t pv, npv;
10611537129bSmckusick 		int s;
10621537129bSmckusick 
106324eb1037Shibler #ifdef PMAPSTATS
10641537129bSmckusick 		enter_stats.managed++;
10651537129bSmckusick #endif
10661537129bSmckusick 		pv = pa_to_pvh(pa);
10671537129bSmckusick 		s = splimp();
10681537129bSmckusick #ifdef DEBUG
10691537129bSmckusick 		if (pmapdebug & PDB_ENTER)
10701537129bSmckusick 			printf("enter: pv at %x: %x/%x/%x\n",
10711537129bSmckusick 			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
10721537129bSmckusick #endif
10731537129bSmckusick 		/*
10741537129bSmckusick 		 * No entries yet, use header as the first entry
10751537129bSmckusick 		 */
1076d45eccefSkarels 		if (pv->pv_pmap == NULL) {
107724eb1037Shibler #ifdef PMAPSTATS
10781537129bSmckusick 			enter_stats.firstpv++;
10791537129bSmckusick #endif
10801537129bSmckusick 			pv->pv_va = va;
10811537129bSmckusick 			pv->pv_pmap = pmap;
1082d45eccefSkarels 			pv->pv_next = NULL;
1083d45eccefSkarels 			pv->pv_ptste = NULL;
1084d45eccefSkarels 			pv->pv_ptpmap = NULL;
10851537129bSmckusick 			pv->pv_flags = 0;
10861537129bSmckusick 		}
10871537129bSmckusick 		/*
10881537129bSmckusick 		 * There is at least one other VA mapping this page.
10891537129bSmckusick 		 * Place this entry after the header.
10901537129bSmckusick 		 */
10911537129bSmckusick 		else {
10921537129bSmckusick #ifdef DEBUG
10931537129bSmckusick 			for (npv = pv; npv; npv = npv->pv_next)
10941537129bSmckusick 				if (pmap == npv->pv_pmap && va == npv->pv_va)
10951537129bSmckusick 					panic("pmap_enter: already in pv_tab");
10961537129bSmckusick #endif
10971537129bSmckusick 			npv = (pv_entry_t)
10981537129bSmckusick 				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
10991537129bSmckusick 			npv->pv_va = va;
11001537129bSmckusick 			npv->pv_pmap = pmap;
11011537129bSmckusick 			npv->pv_next = pv->pv_next;
1102d45eccefSkarels 			npv->pv_ptste = NULL;
1103d45eccefSkarels 			npv->pv_ptpmap = NULL;
110424eb1037Shibler 			npv->pv_flags = 0;
11051537129bSmckusick 			pv->pv_next = npv;
110624eb1037Shibler #ifdef PMAPSTATS
11071537129bSmckusick 			if (!npv->pv_next)
11081537129bSmckusick 				enter_stats.secondpv++;
11091537129bSmckusick #endif
111024eb1037Shibler #ifdef HAVEVAC
11111537129bSmckusick 			/*
11121537129bSmckusick 			 * Since there is another logical mapping for the
11131537129bSmckusick 			 * same page we may need to cache-inhibit the
11141537129bSmckusick 			 * descriptors on those CPUs with external VACs.
11151537129bSmckusick 			 * We don't need to CI if:
11161537129bSmckusick 			 *
11171537129bSmckusick 			 * - No two mappings belong to the same user pmaps.
11181537129bSmckusick 			 *   Since the cache is flushed on context switches
11191537129bSmckusick 			 *   there is no problem between user processes.
11201537129bSmckusick 			 *
11211537129bSmckusick 			 * - Mappings within a single pmap are a certain
11221537129bSmckusick 			 *   magic distance apart.  VAs at these appropriate
11231537129bSmckusick 			 *   boundaries map to the same cache entries or
11241537129bSmckusick 			 *   otherwise don't conflict.
11251537129bSmckusick 			 *
11261537129bSmckusick 			 * To keep it simple, we only check for these special
11271537129bSmckusick 			 * cases if there are only two mappings, otherwise we
11281537129bSmckusick 			 * punt and always CI.
11291537129bSmckusick 			 *
11301537129bSmckusick 			 * Note that there are no aliasing problems with the
11311537129bSmckusick 			 * on-chip data-cache when the WA bit is set.
11321537129bSmckusick 			 */
11331537129bSmckusick 			if (pmap_aliasmask) {
11341537129bSmckusick 				if (pv->pv_flags & PV_CI) {
11351537129bSmckusick #ifdef DEBUG
11361537129bSmckusick 					if (pmapdebug & PDB_CACHE)
11371537129bSmckusick 					printf("enter: pa %x already CI'ed\n",
11381537129bSmckusick 					       pa);
11391537129bSmckusick #endif
11401537129bSmckusick 					checkpv = cacheable = FALSE;
11411537129bSmckusick 				} else if (npv->pv_next ||
11421537129bSmckusick 					   ((pmap == pv->pv_pmap ||
11431537129bSmckusick 					     pmap == kernel_pmap ||
11441537129bSmckusick 					     pv->pv_pmap == kernel_pmap) &&
11451537129bSmckusick 					    ((pv->pv_va & pmap_aliasmask) !=
11461537129bSmckusick 					     (va & pmap_aliasmask)))) {
11471537129bSmckusick #ifdef DEBUG
11481537129bSmckusick 					if (pmapdebug & PDB_CACHE)
11491537129bSmckusick 					printf("enter: pa %x CI'ing all\n",
11501537129bSmckusick 					       pa);
11511537129bSmckusick #endif
11521537129bSmckusick 					cacheable = FALSE;
11531537129bSmckusick 					pv->pv_flags |= PV_CI;
115424eb1037Shibler #ifdef PMAPSTATS
11551537129bSmckusick 					enter_stats.ci++;
11561537129bSmckusick #endif
11571537129bSmckusick 				}
11581537129bSmckusick 			}
115924eb1037Shibler #endif
11601537129bSmckusick 		}
11611537129bSmckusick 		splx(s);
11621537129bSmckusick 	}
11631537129bSmckusick 	/*
11641537129bSmckusick 	 * Assumption: if it is not part of our managed memory
11651537129bSmckusick 	 * then it must be device memory which may be volitile.
11661537129bSmckusick 	 */
11671537129bSmckusick 	else if (pmap_initialized) {
11681537129bSmckusick 		checkpv = cacheable = FALSE;
116924eb1037Shibler #ifdef PMAPSTATS
11701537129bSmckusick 		enter_stats.unmanaged++;
11711537129bSmckusick #endif
11721537129bSmckusick 	}
11731537129bSmckusick 
11741537129bSmckusick 	/*
11751537129bSmckusick 	 * Increment counters
11761537129bSmckusick 	 */
11771537129bSmckusick 	pmap->pm_stats.resident_count++;
11781537129bSmckusick 	if (wired)
11791537129bSmckusick 		pmap->pm_stats.wired_count++;
11801537129bSmckusick 
11811537129bSmckusick validate:
118224eb1037Shibler #ifdef HAVEVAC
11831537129bSmckusick 	/*
118414b153a2Shibler 	 * Purge kernel side of VAC to ensure we get correct state
118514b153a2Shibler 	 * of HW bits so we don't clobber them.
11861537129bSmckusick 	 */
11871537129bSmckusick 	if (pmap_aliasmask)
11881537129bSmckusick 		DCIS();
118924eb1037Shibler #endif
11901537129bSmckusick 	/*
119124eb1037Shibler 	 * Build the new PTE.
11921537129bSmckusick 	 */
119324eb1037Shibler 	npte = pa | pte_prot(pmap, prot) | (*(int *)pte & (PG_M|PG_U)) | PG_V;
11941537129bSmckusick 	if (wired)
11951537129bSmckusick 		npte |= PG_W;
11961537129bSmckusick 	if (!checkpv && !cacheable)
11971537129bSmckusick 		npte |= PG_CI;
119814b153a2Shibler #if defined(HP380)
119914b153a2Shibler 	if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
120014b153a2Shibler #ifdef DEBUG
120114b153a2Shibler 		if (dowriteback && (dokwriteback || pmap != kernel_pmap))
120214b153a2Shibler #endif
120314b153a2Shibler 		npte |= PG_CCB;
120414b153a2Shibler #endif
12051537129bSmckusick #ifdef DEBUG
12061537129bSmckusick 	if (pmapdebug & PDB_ENTER)
12071537129bSmckusick 		printf("enter: new pte value %x\n", npte);
12081537129bSmckusick #endif
120924eb1037Shibler 	/*
121024eb1037Shibler 	 * Remember if this was a wiring-only change.
121124eb1037Shibler 	 * If so, we need not flush the TLB and caches.
121224eb1037Shibler 	 */
121324eb1037Shibler 	wired = ((*(int *)pte ^ npte) == PG_W);
121414b153a2Shibler #if defined(HP380)
121524eb1037Shibler 	if (mmutype == MMU_68040 && !wired) {
121614b153a2Shibler 		DCFP(pa);
121714b153a2Shibler 		ICPP(pa);
121814b153a2Shibler 	}
121914b153a2Shibler #endif
122014b153a2Shibler 	*(int *)pte = npte;
122124eb1037Shibler 	if (!wired && active_pmap(pmap))
122214b153a2Shibler 		TBIS(va);
122324eb1037Shibler #ifdef HAVEVAC
12241537129bSmckusick 	/*
12251537129bSmckusick 	 * The following is executed if we are entering a second
12261537129bSmckusick 	 * (or greater) mapping for a physical page and the mappings
12271537129bSmckusick 	 * may create an aliasing problem.  In this case we must
12281537129bSmckusick 	 * cache inhibit the descriptors involved and flush any
12291537129bSmckusick 	 * external VAC.
12301537129bSmckusick 	 */
12311537129bSmckusick 	if (checkpv && !cacheable) {
12321537129bSmckusick 		pmap_changebit(pa, PG_CI, TRUE);
12331537129bSmckusick 		DCIA();
123424eb1037Shibler #ifdef PMAPSTATS
12351537129bSmckusick 		enter_stats.flushes++;
12361537129bSmckusick #endif
12371537129bSmckusick #ifdef DEBUG
12381537129bSmckusick 		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
12391537129bSmckusick 		    (PDB_CACHE|PDB_PVDUMP))
12401537129bSmckusick 			pmap_pvdump(pa);
12411537129bSmckusick #endif
12421537129bSmckusick 	}
12431537129bSmckusick #ifdef DEBUG
12441537129bSmckusick 	else if (pmapvacflush & PVF_ENTER) {
12451537129bSmckusick 		if (pmapvacflush & PVF_TOTAL)
12461537129bSmckusick 			DCIA();
12471537129bSmckusick 		else if (pmap == kernel_pmap)
12481537129bSmckusick 			DCIS();
12491537129bSmckusick 		else
12501537129bSmckusick 			DCIU();
12511537129bSmckusick 	}
125214b153a2Shibler #endif
125324eb1037Shibler #endif
125424eb1037Shibler #ifdef DEBUG
125524eb1037Shibler 	if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap)
12561537129bSmckusick 		pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
12571537129bSmckusick #endif
12581537129bSmckusick }
12591537129bSmckusick 
12601537129bSmckusick /*
12611537129bSmckusick  *	Routine:	pmap_change_wiring
12621537129bSmckusick  *	Function:	Change the wiring attribute for a map/virtual-address
12631537129bSmckusick  *			pair.
12641537129bSmckusick  *	In/out conditions:
12651537129bSmckusick  *			The mapping must already exist in the pmap.
12661537129bSmckusick  */
12671537129bSmckusick void
pmap_change_wiring(pmap,va,wired)12681537129bSmckusick pmap_change_wiring(pmap, va, wired)
12691537129bSmckusick 	register pmap_t	pmap;
12701537129bSmckusick 	vm_offset_t	va;
12711537129bSmckusick 	boolean_t	wired;
12721537129bSmckusick {
12731537129bSmckusick 	register pt_entry_t *pte;
12741537129bSmckusick 
12751537129bSmckusick #ifdef DEBUG
12761537129bSmckusick 	if (pmapdebug & PDB_FOLLOW)
12771537129bSmckusick 		printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);
12781537129bSmckusick #endif
1279d45eccefSkarels 	if (pmap == NULL)
12801537129bSmckusick 		return;
12811537129bSmckusick 
12821537129bSmckusick 	pte = pmap_pte(pmap, va);
12831537129bSmckusick #ifdef DEBUG
12841537129bSmckusick 	/*
12851537129bSmckusick 	 * Page table page is not allocated.
12861537129bSmckusick 	 * Should this ever happen?  Ignore it for now,
12871537129bSmckusick 	 * we don't want to force allocation of unnecessary PTE pages.
12881537129bSmckusick 	 */
128914b153a2Shibler 	if (!pmap_ste_v(pmap, va)) {
12901537129bSmckusick 		if (pmapdebug & PDB_PARANOIA)
12911537129bSmckusick 			printf("pmap_change_wiring: invalid STE for %x\n", va);
12921537129bSmckusick 		return;
12931537129bSmckusick 	}
12941537129bSmckusick 	/*
12951537129bSmckusick 	 * Page not valid.  Should this ever happen?
12961537129bSmckusick 	 * Just continue and change wiring anyway.
12971537129bSmckusick 	 */
12981537129bSmckusick 	if (!pmap_pte_v(pte)) {
12991537129bSmckusick 		if (pmapdebug & PDB_PARANOIA)
13001537129bSmckusick 			printf("pmap_change_wiring: invalid PTE for %x\n", va);
13011537129bSmckusick 	}
13021537129bSmckusick #endif
130324eb1037Shibler 	/*
130424eb1037Shibler 	 * If wiring actually changed (always?) set the wire bit and
130524eb1037Shibler 	 * update the wire count.  Note that wiring is not a hardware
130624eb1037Shibler 	 * characteristic so there is no need to invalidate the TLB.
130724eb1037Shibler 	 */
130824eb1037Shibler 	if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
130924eb1037Shibler 		pmap_pte_set_w(pte, wired);
13101537129bSmckusick 		if (wired)
13111537129bSmckusick 			pmap->pm_stats.wired_count++;
13121537129bSmckusick 		else
13131537129bSmckusick 			pmap->pm_stats.wired_count--;
13141537129bSmckusick 	}
131514b153a2Shibler }
13161537129bSmckusick 
13171537129bSmckusick /*
13181537129bSmckusick  *	Routine:	pmap_extract
13191537129bSmckusick  *	Function:
13201537129bSmckusick  *		Extract the physical page address associated
13211537129bSmckusick  *		with the given map/virtual_address pair.
13221537129bSmckusick  */
13231537129bSmckusick 
13241537129bSmckusick vm_offset_t
pmap_extract(pmap,va)13251537129bSmckusick pmap_extract(pmap, va)
13261537129bSmckusick 	register pmap_t	pmap;
13271537129bSmckusick 	vm_offset_t va;
13281537129bSmckusick {
13291537129bSmckusick 	register vm_offset_t pa;
13301537129bSmckusick 
13311537129bSmckusick #ifdef DEBUG
13321537129bSmckusick 	if (pmapdebug & PDB_FOLLOW)
13331537129bSmckusick 		printf("pmap_extract(%x, %x) -> ", pmap, va);
13341537129bSmckusick #endif
13351537129bSmckusick 	pa = 0;
133614b153a2Shibler 	if (pmap && pmap_ste_v(pmap, va))
13371537129bSmckusick 		pa = *(int *)pmap_pte(pmap, va);
13381537129bSmckusick 	if (pa)
13391537129bSmckusick 		pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
13401537129bSmckusick #ifdef DEBUG
13411537129bSmckusick 	if (pmapdebug & PDB_FOLLOW)
13421537129bSmckusick 		printf("%x\n", pa);
13431537129bSmckusick #endif
13441537129bSmckusick 	return(pa);
13451537129bSmckusick }
13461537129bSmckusick 
13471537129bSmckusick /*
13481537129bSmckusick  *	Copy the range specified by src_addr/len
13491537129bSmckusick  *	from the source map to the range dst_addr/len
13501537129bSmckusick  *	in the destination map.
13511537129bSmckusick  *
13521537129bSmckusick  *	This routine is only advisory and need not do anything.
13531537129bSmckusick  */
pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)13541537129bSmckusick void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
13551537129bSmckusick 	pmap_t		dst_pmap;
13561537129bSmckusick 	pmap_t		src_pmap;
13571537129bSmckusick 	vm_offset_t	dst_addr;
13581537129bSmckusick 	vm_size_t	len;
13591537129bSmckusick 	vm_offset_t	src_addr;
13601537129bSmckusick {
13611537129bSmckusick #ifdef DEBUG
13621537129bSmckusick 	if (pmapdebug & PDB_FOLLOW)
13631537129bSmckusick 		printf("pmap_copy(%x, %x, %x, %x, %x)\n",
13641537129bSmckusick 		       dst_pmap, src_pmap, dst_addr, len, src_addr);
13651537129bSmckusick #endif
13661537129bSmckusick }
13671537129bSmckusick 
13681537129bSmckusick /*
13691537129bSmckusick  *	Require that all active physical maps contain no
13701537129bSmckusick  *	incorrect entries NOW.  [This update includes
13711537129bSmckusick  *	forcing updates of any address map caching.]
13721537129bSmckusick  *
13731537129bSmckusick  *	Generally used to insure that a thread about
13741537129bSmckusick  *	to run will see a semantically correct world.
13751537129bSmckusick  */
pmap_update()13761537129bSmckusick void pmap_update()
13771537129bSmckusick {
13781537129bSmckusick #ifdef DEBUG
13791537129bSmckusick 	if (pmapdebug & PDB_FOLLOW)
13801537129bSmckusick 		printf("pmap_update()\n");
13811537129bSmckusick #endif
13821537129bSmckusick 	TBIA();
13831537129bSmckusick }
13841537129bSmckusick 
13851537129bSmckusick /*
13861537129bSmckusick  *	Routine:	pmap_collect
13871537129bSmckusick  *	Function:
13881537129bSmckusick  *		Garbage collects the physical map system for
13891537129bSmckusick  *		pages which are no longer used.
13901537129bSmckusick  *		Success need not be guaranteed -- that is, there
13911537129bSmckusick  *		may well be pages which are not referenced, but
13921537129bSmckusick  *		others may be collected.
13931537129bSmckusick  *	Usage:
13941537129bSmckusick  *		Called by the pageout daemon when pages are scarce.
13951537129bSmckusick  */
13961537129bSmckusick void
pmap_collect(pmap)13971537129bSmckusick pmap_collect(pmap)
13981537129bSmckusick 	pmap_t		pmap;
13991537129bSmckusick {
14001537129bSmckusick 	register vm_offset_t pa;
14011537129bSmckusick 	register pv_entry_t pv;
14021537129bSmckusick 	register int *pte;
14031537129bSmckusick 	vm_offset_t kpa;
14041537129bSmckusick 	int s;
14051537129bSmckusick 
14061537129bSmckusick #ifdef DEBUG
14071537129bSmckusick 	int *ste;
14081537129bSmckusick 	int opmapdebug;
14091537129bSmckusick #endif
14101537129bSmckusick 	if (pmap != kernel_pmap)
14111537129bSmckusick 		return;
14121537129bSmckusick 
14131537129bSmckusick #ifdef DEBUG
14141537129bSmckusick 	if (pmapdebug & PDB_FOLLOW)
14151537129bSmckusick 		printf("pmap_collect(%x)\n", pmap);
141624eb1037Shibler #endif
141724eb1037Shibler #ifdef PMAPSTATS
14181537129bSmckusick 	kpt_stats.collectscans++;
14191537129bSmckusick #endif
14201537129bSmckusick 	s = splimp();
14211537129bSmckusick 	for (pa = vm_first_phys; pa < vm_last_phys; pa += PAGE_SIZE) {
14221537129bSmckusick 		register struct kpt_page *kpt, **pkpt;
14231537129bSmckusick 
14241537129bSmckusick 		/*
14251537129bSmckusick 		 * Locate physical pages which are being used as kernel
14261537129bSmckusick 		 * page table pages.
14271537129bSmckusick 		 */
14281537129bSmckusick 		pv = pa_to_pvh(pa);
14291537129bSmckusick 		if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE))
14301537129bSmckusick 			continue;
14311537129bSmckusick 		do {
14321537129bSmckusick 			if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap)
14331537129bSmckusick 				break;
14341537129bSmckusick 		} while (pv = pv->pv_next);
1435d45eccefSkarels 		if (pv == NULL)
14361537129bSmckusick 			continue;
14371537129bSmckusick #ifdef DEBUG
14381537129bSmckusick 		if (pv->pv_va < (vm_offset_t)Sysmap ||
14391537129bSmckusick 		    pv->pv_va >= (vm_offset_t)Sysmap + HP_MAX_PTSIZE)
14401537129bSmckusick 			printf("collect: kernel PT VA out of range\n");
14411537129bSmckusick 		else
14421537129bSmckusick 			goto ok;
14431537129bSmckusick 		pmap_pvdump(pa);
14441537129bSmckusick 		continue;
14451537129bSmckusick ok:
14461537129bSmckusick #endif
14471537129bSmckusick 		pte = (int *)(pv->pv_va + HP_PAGE_SIZE);
14481537129bSmckusick 		while (--pte >= (int *)pv->pv_va && *pte == PG_NV)
14491537129bSmckusick 			;
14501537129bSmckusick 		if (pte >= (int *)pv->pv_va)
14511537129bSmckusick 			continue;
14521537129bSmckusick 
14531537129bSmckusick #ifdef DEBUG
14541537129bSmckusick 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
14551537129bSmckusick 			printf("collect: freeing KPT page at %x (ste %x@%x)\n",
14561537129bSmckusick 			       pv->pv_va, *(int *)pv->pv_ptste, pv->pv_ptste);
14571537129bSmckusick 			opmapdebug = pmapdebug;
14581537129bSmckusick 			pmapdebug |= PDB_PTPAGE;
14591537129bSmckusick 		}
14601537129bSmckusick 
14611537129bSmckusick 		ste = (int *)pv->pv_ptste;
14621537129bSmckusick #endif
14631537129bSmckusick 		/*
14641537129bSmckusick 		 * If all entries were invalid we can remove the page.
146524eb1037Shibler 		 * We call pmap_remove_entry to take care of invalidating
146624eb1037Shibler 		 * ST and Sysptmap entries.
14671537129bSmckusick 		 */
14681537129bSmckusick 		kpa = pmap_extract(pmap, pv->pv_va);
146924eb1037Shibler 		pmap_remove_mapping(pmap, pv->pv_va, PT_ENTRY_NULL,
147024eb1037Shibler 				    PRM_TFLUSH|PRM_CFLUSH);
14711537129bSmckusick 		/*
14721537129bSmckusick 		 * Use the physical address to locate the original
14731537129bSmckusick 		 * (kmem_alloc assigned) address for the page and put
14741537129bSmckusick 		 * that page back on the free list.
14751537129bSmckusick 		 */
14761537129bSmckusick 		for (pkpt = &kpt_used_list, kpt = *pkpt;
14771537129bSmckusick 		     kpt != (struct kpt_page *)0;
14781537129bSmckusick 		     pkpt = &kpt->kpt_next, kpt = *pkpt)
14791537129bSmckusick 			if (kpt->kpt_pa == kpa)
14801537129bSmckusick 				break;
14811537129bSmckusick #ifdef DEBUG
14821537129bSmckusick 		if (kpt == (struct kpt_page *)0)
14831537129bSmckusick 			panic("pmap_collect: lost a KPT page");
14841537129bSmckusick 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
14851537129bSmckusick 			printf("collect: %x (%x) to free list\n",
14861537129bSmckusick 			       kpt->kpt_va, kpa);
14871537129bSmckusick #endif
14881537129bSmckusick 		*pkpt = kpt->kpt_next;
14891537129bSmckusick 		kpt->kpt_next = kpt_free_list;
14901537129bSmckusick 		kpt_free_list = kpt;
149124eb1037Shibler #ifdef PMAPSTATS
14921537129bSmckusick 		kpt_stats.kptinuse--;
14931537129bSmckusick 		kpt_stats.collectpages++;
149424eb1037Shibler #endif
149524eb1037Shibler #ifdef DEBUG
14961537129bSmckusick 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
14971537129bSmckusick 			pmapdebug = opmapdebug;
14981537129bSmckusick 
14991537129bSmckusick 		if (*ste)
15001537129bSmckusick 			printf("collect: kernel STE at %x still valid (%x)\n",
15011537129bSmckusick 			       ste, *ste);
15021537129bSmckusick 		ste = (int *)&Sysptmap[(st_entry_t *)ste-pmap_ste(kernel_pmap, 0)];
15031537129bSmckusick 		if (*ste)
15041537129bSmckusick 			printf("collect: kernel PTmap at %x still valid (%x)\n",
15051537129bSmckusick 			       ste, *ste);
15061537129bSmckusick #endif
15071537129bSmckusick 	}
15081537129bSmckusick 	splx(s);
15091537129bSmckusick }
15101537129bSmckusick 
15111537129bSmckusick void
pmap_activate(pmap,pcbp)15121537129bSmckusick pmap_activate(pmap, pcbp)
15131537129bSmckusick 	register pmap_t pmap;
15141537129bSmckusick 	struct pcb *pcbp;
15151537129bSmckusick {
15161537129bSmckusick #ifdef DEBUG
15171537129bSmckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_SEGTAB))
15181537129bSmckusick 		printf("pmap_activate(%x, %x)\n", pmap, pcbp);
15191537129bSmckusick #endif
1520d45eccefSkarels 	PMAP_ACTIVATE(pmap, pcbp, pmap == curproc->p_vmspace->vm_map.pmap);
15211537129bSmckusick }
15221537129bSmckusick 
15231537129bSmckusick /*
15241537129bSmckusick  *	pmap_zero_page zeros the specified (machine independent)
15251537129bSmckusick  *	page by mapping the page into virtual memory and using
15261537129bSmckusick  *	bzero to clear its contents, one machine dependent page
15271537129bSmckusick  *	at a time.
152814b153a2Shibler  *
152914b153a2Shibler  *	XXX this is a bad implementation for virtual cache machines
153014b153a2Shibler  *	(320/350) because pmap_enter doesn't cache-inhibit the temporary
153114b153a2Shibler  *	kernel mapping and we wind up with data cached for that KVA.
153214b153a2Shibler  *	It is probably a win for physical cache machines (370/380)
153314b153a2Shibler  *	as the cache loading is not wasted.
15341537129bSmckusick  */
153550d5606dSmarc void
pmap_zero_page(phys)15361537129bSmckusick pmap_zero_page(phys)
153714b153a2Shibler 	vm_offset_t phys;
15381537129bSmckusick {
153914b153a2Shibler 	register vm_offset_t kva;
154014b153a2Shibler 	extern caddr_t CADDR1;
15411537129bSmckusick 
15421537129bSmckusick #ifdef DEBUG
15431537129bSmckusick 	if (pmapdebug & PDB_FOLLOW)
15441537129bSmckusick 		printf("pmap_zero_page(%x)\n", phys);
15451537129bSmckusick #endif
154614b153a2Shibler 	kva = (vm_offset_t) CADDR1;
154714b153a2Shibler 	pmap_enter(kernel_pmap, kva, phys, VM_PROT_READ|VM_PROT_WRITE, TRUE);
154814b153a2Shibler 	bzero((caddr_t)kva, HP_PAGE_SIZE);
154924eb1037Shibler 	pmap_remove_mapping(kernel_pmap, kva, PT_ENTRY_NULL,
155024eb1037Shibler 			    PRM_TFLUSH|PRM_CFLUSH);
155114b153a2Shibler }
15521537129bSmckusick 
15531537129bSmckusick /*
15541537129bSmckusick  *	pmap_copy_page copies the specified (machine independent)
15551537129bSmckusick  *	page by mapping the page into virtual memory and using
15561537129bSmckusick  *	bcopy to copy the page, one machine dependent page at a
15571537129bSmckusick  *	time.
155814b153a2Shibler  *
155914b153a2Shibler  *
156014b153a2Shibler  *	XXX this is a bad implementation for virtual cache machines
156114b153a2Shibler  *	(320/350) because pmap_enter doesn't cache-inhibit the temporary
156214b153a2Shibler  *	kernel mapping and we wind up with data cached for that KVA.
156314b153a2Shibler  *	It is probably a win for physical cache machines (370/380)
156414b153a2Shibler  *	as the cache loading is not wasted.
15651537129bSmckusick  */
156650d5606dSmarc void
pmap_copy_page(src,dst)15671537129bSmckusick pmap_copy_page(src, dst)
156814b153a2Shibler 	vm_offset_t src, dst;
15691537129bSmckusick {
157014b153a2Shibler 	register vm_offset_t skva, dkva;
157114b153a2Shibler 	extern caddr_t CADDR1, CADDR2;
15721537129bSmckusick 
15731537129bSmckusick #ifdef DEBUG
15741537129bSmckusick 	if (pmapdebug & PDB_FOLLOW)
15751537129bSmckusick 		printf("pmap_copy_page(%x, %x)\n", src, dst);
15761537129bSmckusick #endif
157714b153a2Shibler 	skva = (vm_offset_t) CADDR1;
157814b153a2Shibler 	dkva = (vm_offset_t) CADDR2;
157914b153a2Shibler 	pmap_enter(kernel_pmap, skva, src, VM_PROT_READ, TRUE);
158014b153a2Shibler 	pmap_enter(kernel_pmap, dkva, dst, VM_PROT_READ|VM_PROT_WRITE, TRUE);
15817c8de635Shibler 	copypage((caddr_t)skva, (caddr_t)dkva);
158214b153a2Shibler 	/* CADDR1 and CADDR2 are virtually contiguous */
158314b153a2Shibler 	pmap_remove(kernel_pmap, skva, skva+2*PAGE_SIZE);
158414b153a2Shibler }
15851537129bSmckusick 
15861537129bSmckusick /*
15871537129bSmckusick  *	Routine:	pmap_pageable
15881537129bSmckusick  *	Function:
15891537129bSmckusick  *		Make the specified pages (by pmap, offset)
15901537129bSmckusick  *		pageable (or not) as requested.
15911537129bSmckusick  *
15921537129bSmckusick  *		A page which is not pageable may not take
15931537129bSmckusick  *		a fault; therefore, its page table entry
15941537129bSmckusick  *		must remain valid for the duration.
15951537129bSmckusick  *
15961537129bSmckusick  *		This routine is merely advisory; pmap_enter
15971537129bSmckusick  *		will specify that these pages are to be wired
15981537129bSmckusick  *		down (or not) as appropriate.
15991537129bSmckusick  */
160050d5606dSmarc void
pmap_pageable(pmap,sva,eva,pageable)16011537129bSmckusick pmap_pageable(pmap, sva, eva, pageable)
16021537129bSmckusick 	pmap_t		pmap;
16031537129bSmckusick 	vm_offset_t	sva, eva;
16041537129bSmckusick 	boolean_t	pageable;
16051537129bSmckusick {
16061537129bSmckusick #ifdef DEBUG
16071537129bSmckusick 	if (pmapdebug & PDB_FOLLOW)
16081537129bSmckusick 		printf("pmap_pageable(%x, %x, %x, %x)\n",
16091537129bSmckusick 		       pmap, sva, eva, pageable);
16101537129bSmckusick #endif
16111537129bSmckusick 	/*
16121537129bSmckusick 	 * If we are making a PT page pageable then all valid
16131537129bSmckusick 	 * mappings must be gone from that page.  Hence it should
16141537129bSmckusick 	 * be all zeros and there is no need to clean it.
16151537129bSmckusick 	 * Assumptions:
16161537129bSmckusick 	 *	- we are called with only one page at a time
16171537129bSmckusick 	 *	- PT pages have only one pv_table entry
16181537129bSmckusick 	 */
16191537129bSmckusick 	if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
16201537129bSmckusick 		register pv_entry_t pv;
16211537129bSmckusick 		register vm_offset_t pa;
16221537129bSmckusick 
16231537129bSmckusick #ifdef DEBUG
16241537129bSmckusick 		if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE)
16251537129bSmckusick 			printf("pmap_pageable(%x, %x, %x, %x)\n",
16261537129bSmckusick 			       pmap, sva, eva, pageable);
16271537129bSmckusick #endif
162814b153a2Shibler 		if (!pmap_ste_v(pmap, sva))
16291537129bSmckusick 			return;
16301537129bSmckusick 		pa = pmap_pte_pa(pmap_pte(pmap, sva));
16311537129bSmckusick 		if (pa < vm_first_phys || pa >= vm_last_phys)
16321537129bSmckusick 			return;
16331537129bSmckusick 		pv = pa_to_pvh(pa);
1634d45eccefSkarels 		if (pv->pv_ptste == NULL)
16351537129bSmckusick 			return;
16361537129bSmckusick #ifdef DEBUG
16371537129bSmckusick 		if (pv->pv_va != sva || pv->pv_next) {
16381537129bSmckusick 			printf("pmap_pageable: bad PT page va %x next %x\n",
16391537129bSmckusick 			       pv->pv_va, pv->pv_next);
16401537129bSmckusick 			return;
16411537129bSmckusick 		}
16421537129bSmckusick #endif
16431537129bSmckusick 		/*
16441537129bSmckusick 		 * Mark it unmodified to avoid pageout
16451537129bSmckusick 		 */
1646d45eccefSkarels 		pmap_changebit(pa, PG_M, FALSE);
16471537129bSmckusick #ifdef DEBUG
16481d2d1332Shibler 		if ((PHYS_TO_VM_PAGE(pa)->flags & PG_CLEAN) == 0) {
16491d2d1332Shibler 			printf("pa %x: flags=%x: not clean\n",
16501d2d1332Shibler 			       pa, PHYS_TO_VM_PAGE(pa)->flags);
16511d2d1332Shibler 			PHYS_TO_VM_PAGE(pa)->flags |= PG_CLEAN;
16521d2d1332Shibler 		}
16531537129bSmckusick 		if (pmapdebug & PDB_PTPAGE)
16541537129bSmckusick 			printf("pmap_pageable: PT page %x(%x) unmodified\n",
16551537129bSmckusick 			       sva, *(int *)pmap_pte(pmap, sva));
16561537129bSmckusick 		if (pmapdebug & PDB_WIRING)
16571537129bSmckusick 			pmap_check_wiring("pageable", sva);
16581537129bSmckusick #endif
16591537129bSmckusick 	}
16601537129bSmckusick }
16611537129bSmckusick 
16621537129bSmckusick /*
16631537129bSmckusick  *	Clear the modify bits on the specified physical page.
16641537129bSmckusick  */
16651537129bSmckusick 
16661537129bSmckusick void
pmap_clear_modify(pa)16671537129bSmckusick pmap_clear_modify(pa)
16681537129bSmckusick 	vm_offset_t	pa;
16691537129bSmckusick {
16701537129bSmckusick #ifdef DEBUG
16711537129bSmckusick 	if (pmapdebug & PDB_FOLLOW)
16721537129bSmckusick 		printf("pmap_clear_modify(%x)\n", pa);
16731537129bSmckusick #endif
16741537129bSmckusick 	pmap_changebit(pa, PG_M, FALSE);
16751537129bSmckusick }
16761537129bSmckusick 
16771537129bSmckusick /*
16781537129bSmckusick  *	pmap_clear_reference:
16791537129bSmckusick  *
16801537129bSmckusick  *	Clear the reference bit on the specified physical page.
16811537129bSmckusick  */
16821537129bSmckusick 
pmap_clear_reference(pa)16831537129bSmckusick void pmap_clear_reference(pa)
16841537129bSmckusick 	vm_offset_t	pa;
16851537129bSmckusick {
16861537129bSmckusick #ifdef DEBUG
16871537129bSmckusick 	if (pmapdebug & PDB_FOLLOW)
16881537129bSmckusick 		printf("pmap_clear_reference(%x)\n", pa);
16891537129bSmckusick #endif
16901537129bSmckusick 	pmap_changebit(pa, PG_U, FALSE);
16911537129bSmckusick }
16921537129bSmckusick 
16931537129bSmckusick /*
16941537129bSmckusick  *	pmap_is_referenced:
16951537129bSmckusick  *
16961537129bSmckusick  *	Return whether or not the specified physical page is referenced
16971537129bSmckusick  *	by any physical maps.
16981537129bSmckusick  */
16991537129bSmckusick 
17001537129bSmckusick boolean_t
pmap_is_referenced(pa)17011537129bSmckusick pmap_is_referenced(pa)
17021537129bSmckusick 	vm_offset_t	pa;
17031537129bSmckusick {
17041537129bSmckusick #ifdef DEBUG
17051537129bSmckusick 	if (pmapdebug & PDB_FOLLOW) {
17061537129bSmckusick 		boolean_t rv = pmap_testbit(pa, PG_U);
17071537129bSmckusick 		printf("pmap_is_referenced(%x) -> %c\n", pa, "FT"[rv]);
17081537129bSmckusick 		return(rv);
17091537129bSmckusick 	}
17101537129bSmckusick #endif
17111537129bSmckusick 	return(pmap_testbit(pa, PG_U));
17121537129bSmckusick }
17131537129bSmckusick 
17141537129bSmckusick /*
17151537129bSmckusick  *	pmap_is_modified:
17161537129bSmckusick  *
17171537129bSmckusick  *	Return whether or not the specified physical page is modified
17181537129bSmckusick  *	by any physical maps.
17191537129bSmckusick  */
17201537129bSmckusick 
17211537129bSmckusick boolean_t
pmap_is_modified(pa)17221537129bSmckusick pmap_is_modified(pa)
17231537129bSmckusick 	vm_offset_t	pa;
17241537129bSmckusick {
17251537129bSmckusick #ifdef DEBUG
17261537129bSmckusick 	if (pmapdebug & PDB_FOLLOW) {
17271537129bSmckusick 		boolean_t rv = pmap_testbit(pa, PG_M);
17281537129bSmckusick 		printf("pmap_is_modified(%x) -> %c\n", pa, "FT"[rv]);
17291537129bSmckusick 		return(rv);
17301537129bSmckusick 	}
17311537129bSmckusick #endif
17321537129bSmckusick 	return(pmap_testbit(pa, PG_M));
17331537129bSmckusick }
17341537129bSmckusick 
17351537129bSmckusick vm_offset_t
pmap_phys_address(ppn)17361537129bSmckusick pmap_phys_address(ppn)
17371537129bSmckusick 	int ppn;
17381537129bSmckusick {
17391537129bSmckusick 	return(hp300_ptob(ppn));
17401537129bSmckusick }
17411537129bSmckusick 
174224eb1037Shibler #ifdef HPUXCOMPAT
174324eb1037Shibler /*
174424eb1037Shibler  * 'PUX hack for dealing with the so called multi-mapped address space.
174524eb1037Shibler  * The first 256mb is mapped in at every 256mb region from 0x10000000
174624eb1037Shibler  * up to 0xF0000000.  This allows for 15 bits of tag information.
174724eb1037Shibler  *
174824eb1037Shibler  * We implement this at the segment table level, the machine independent
174924eb1037Shibler  * VM knows nothing about it.
175024eb1037Shibler  */
pmap_mapmulti(pmap,va)175124eb1037Shibler pmap_mapmulti(pmap, va)
175224eb1037Shibler 	pmap_t pmap;
175324eb1037Shibler 	vm_offset_t va;
175424eb1037Shibler {
175524eb1037Shibler 	int *ste, *bste;
175624eb1037Shibler 
175724eb1037Shibler #ifdef DEBUG
175824eb1037Shibler 	if (pmapdebug & PDB_MULTIMAP) {
175924eb1037Shibler 		ste = (int *)pmap_ste(pmap, HPMMBASEADDR(va));
176024eb1037Shibler 		printf("pmap_mapmulti(%x, %x): bste %x(%x)",
176124eb1037Shibler 		       pmap, va, ste, *ste);
176224eb1037Shibler 		ste = (int *)pmap_ste(pmap, va);
176324eb1037Shibler 		printf(" ste %x(%x)\n", ste, *ste);
176424eb1037Shibler 	}
176524eb1037Shibler #endif
176624eb1037Shibler 	bste = (int *) pmap_ste(pmap, HPMMBASEADDR(va));
176724eb1037Shibler 	ste = (int *) pmap_ste(pmap, va);
176824eb1037Shibler 	if (*ste == SG_NV && (*bste & SG_V)) {
176924eb1037Shibler 		*ste = *bste;
177024eb1037Shibler 		TBIAU();
177124eb1037Shibler 		return (KERN_SUCCESS);
177224eb1037Shibler 	}
177324eb1037Shibler 	return (KERN_INVALID_ADDRESS);
177424eb1037Shibler }
177524eb1037Shibler #endif
177624eb1037Shibler 
17771537129bSmckusick /*
17781537129bSmckusick  * Miscellaneous support routines follow
17791537129bSmckusick  */
17801537129bSmckusick 
178124eb1037Shibler /*
178224eb1037Shibler  * Invalidate a single page denoted by pmap/va.
178324eb1037Shibler  * If (pte != NULL), it is the already computed PTE for the page.
178424eb1037Shibler  * If (flags & PRM_TFLUSH), we must invalidate any TLB information.
178524eb1037Shibler  * If (flags & PRM_CFLUSH), we must flush/invalidate any cache information.
178624eb1037Shibler  */
178724eb1037Shibler /* static */
178824eb1037Shibler void
pmap_remove_mapping(pmap,va,pte,flags)178924eb1037Shibler pmap_remove_mapping(pmap, va, pte, flags)
179024eb1037Shibler 	register pmap_t pmap;
179124eb1037Shibler 	register vm_offset_t va;
179224eb1037Shibler 	register pt_entry_t *pte;
179324eb1037Shibler 	int flags;
179424eb1037Shibler {
179524eb1037Shibler 	register vm_offset_t pa;
179624eb1037Shibler 	register pv_entry_t pv, npv;
179724eb1037Shibler 	pmap_t ptpmap;
179824eb1037Shibler 	int *ste, s, bits;
179924eb1037Shibler #ifdef DEBUG
180024eb1037Shibler 	pt_entry_t opte;
180124eb1037Shibler 
180224eb1037Shibler 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
180324eb1037Shibler 		printf("pmap_remove_mapping(%x, %x, %x, %x)\n",
180424eb1037Shibler 		       pmap, va, pte, flags);
180524eb1037Shibler #endif
180624eb1037Shibler 
180724eb1037Shibler 	/*
180824eb1037Shibler 	 * PTE not provided, compute it from pmap and va.
180924eb1037Shibler 	 */
181024eb1037Shibler 	if (pte == PT_ENTRY_NULL) {
181124eb1037Shibler 		pte = pmap_pte(pmap, va);
181224eb1037Shibler 		if (*(int *)pte == PG_NV)
181324eb1037Shibler 			return;
181424eb1037Shibler 	}
181524eb1037Shibler #ifdef HAVEVAC
181624eb1037Shibler 	if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
181724eb1037Shibler 		/*
181824eb1037Shibler 		 * Purge kernel side of VAC to ensure we get the correct
181924eb1037Shibler 		 * state of any hardware maintained bits.
182024eb1037Shibler 		 */
182124eb1037Shibler 		DCIS();
182224eb1037Shibler #ifdef PMAPSTATS
182324eb1037Shibler 		remove_stats.sflushes++;
182424eb1037Shibler #endif
182524eb1037Shibler 		/*
182624eb1037Shibler 		 * If this is a non-CI user mapping for the current process,
182724eb1037Shibler 		 * flush the VAC.  Note that the kernel side was flushed
182824eb1037Shibler 		 * above so we don't worry about non-CI kernel mappings.
182924eb1037Shibler 		 */
183024eb1037Shibler 		if (pmap == curproc->p_vmspace->vm_map.pmap &&
183124eb1037Shibler 		    !pmap_pte_ci(pte)) {
183224eb1037Shibler 			DCIU();
183324eb1037Shibler #ifdef PMAPSTATS
183424eb1037Shibler 			remove_stats.uflushes++;
183524eb1037Shibler #endif
183624eb1037Shibler 		}
183724eb1037Shibler 	}
183824eb1037Shibler #endif
183924eb1037Shibler 	pa = pmap_pte_pa(pte);
184024eb1037Shibler #ifdef DEBUG
184124eb1037Shibler 	opte = *pte;
184224eb1037Shibler #endif
184324eb1037Shibler #ifdef PMAPSTATS
184424eb1037Shibler 	remove_stats.removes++;
184524eb1037Shibler #endif
184624eb1037Shibler 	/*
184724eb1037Shibler 	 * Update statistics
184824eb1037Shibler 	 */
184924eb1037Shibler 	if (pmap_pte_w(pte))
185024eb1037Shibler 		pmap->pm_stats.wired_count--;
185124eb1037Shibler 	pmap->pm_stats.resident_count--;
185224eb1037Shibler 
185324eb1037Shibler 	/*
185424eb1037Shibler 	 * Invalidate the PTE after saving the reference modify info.
185524eb1037Shibler 	 */
185624eb1037Shibler #ifdef DEBUG
185724eb1037Shibler 	if (pmapdebug & PDB_REMOVE)
185824eb1037Shibler 		printf("remove: invalidating pte at %x\n", pte);
185924eb1037Shibler #endif
186024eb1037Shibler 	bits = *(int *)pte & (PG_U|PG_M);
186124eb1037Shibler 	*(int *)pte = PG_NV;
186224eb1037Shibler 	if ((flags & PRM_TFLUSH) && active_pmap(pmap))
186324eb1037Shibler 		TBIS(va);
186424eb1037Shibler 	/*
186524eb1037Shibler 	 * For user mappings decrement the wiring count on
186624eb1037Shibler 	 * the PT page.  We do this after the PTE has been
186724eb1037Shibler 	 * invalidated because vm_map_pageable winds up in
186824eb1037Shibler 	 * pmap_pageable which clears the modify bit for the
186924eb1037Shibler 	 * PT page.
187024eb1037Shibler 	 */
187124eb1037Shibler 	if (pmap != kernel_pmap) {
1872*736e6f7aSmckusick #if defined(DEBUG) && NCPUS == 1
1873*736e6f7aSmckusick 		/*
1874*736e6f7aSmckusick 		 * XXX this recursive use of the VM won't work on a MP
1875*736e6f7aSmckusick 		 * (or when otherwise debugging simple locks).  We might
1876*736e6f7aSmckusick 		 * be called with the page queue lock held (e.g. from
1877*736e6f7aSmckusick 		 * the pageout daemon) and vm_map_pageable might call
1878*736e6f7aSmckusick 		 * vm_fault_unwire which would try to lock the page queues
1879*736e6f7aSmckusick 		 * again.  For debugging we hack and drop the lock.
1880*736e6f7aSmckusick 		 */
1881*736e6f7aSmckusick 		int hadit = !simple_lock_try(&vm_page_queue_lock);
1882*736e6f7aSmckusick 		simple_unlock(&vm_page_queue_lock);
1883*736e6f7aSmckusick #endif
188481dd55b1Shibler 		(void) vm_map_pageable(pt_map, trunc_page(pte),
188524eb1037Shibler 				       round_page(pte+1), TRUE);
188624eb1037Shibler #ifdef DEBUG
188724eb1037Shibler 		if (pmapdebug & PDB_WIRING)
188824eb1037Shibler 			pmap_check_wiring("remove", trunc_page(pte));
1889*736e6f7aSmckusick #if NCPUS == 1
1890*736e6f7aSmckusick 		if (hadit)
1891*736e6f7aSmckusick 			simple_lock(&vm_page_queue_lock);
1892*736e6f7aSmckusick #endif
189324eb1037Shibler #endif
189424eb1037Shibler 	}
189524eb1037Shibler 	/*
189624eb1037Shibler 	 * If this isn't a managed page, we are all done.
189724eb1037Shibler 	 */
189824eb1037Shibler 	if (pa < vm_first_phys || pa >= vm_last_phys)
189924eb1037Shibler 		return;
190024eb1037Shibler 	/*
190124eb1037Shibler 	 * Otherwise remove it from the PV table
190224eb1037Shibler 	 * (raise IPL since we may be called at interrupt time).
190324eb1037Shibler 	 */
190424eb1037Shibler 	pv = pa_to_pvh(pa);
190524eb1037Shibler 	ste = (int *)0;
190624eb1037Shibler 	s = splimp();
190724eb1037Shibler 	/*
190824eb1037Shibler 	 * If it is the first entry on the list, it is actually
190924eb1037Shibler 	 * in the header and we must copy the following entry up
191024eb1037Shibler 	 * to the header.  Otherwise we must search the list for
191124eb1037Shibler 	 * the entry.  In either case we free the now unused entry.
191224eb1037Shibler 	 */
191324eb1037Shibler 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
191424eb1037Shibler 		ste = (int *)pv->pv_ptste;
191524eb1037Shibler 		ptpmap = pv->pv_ptpmap;
191624eb1037Shibler 		npv = pv->pv_next;
191724eb1037Shibler 		if (npv) {
191824eb1037Shibler 			npv->pv_flags = pv->pv_flags;
191924eb1037Shibler 			*pv = *npv;
192024eb1037Shibler 			free((caddr_t)npv, M_VMPVENT);
192124eb1037Shibler 		} else
192224eb1037Shibler 			pv->pv_pmap = NULL;
192324eb1037Shibler #ifdef PMAPSTATS
192424eb1037Shibler 		remove_stats.pvfirst++;
192524eb1037Shibler #endif
192624eb1037Shibler 	} else {
192724eb1037Shibler 		for (npv = pv->pv_next; npv; npv = npv->pv_next) {
192824eb1037Shibler #ifdef PMAPSTATS
192924eb1037Shibler 			remove_stats.pvsearch++;
193024eb1037Shibler #endif
193124eb1037Shibler 			if (pmap == npv->pv_pmap && va == npv->pv_va)
193224eb1037Shibler 				break;
193324eb1037Shibler 			pv = npv;
193424eb1037Shibler 		}
193524eb1037Shibler #ifdef DEBUG
193624eb1037Shibler 		if (npv == NULL)
193724eb1037Shibler 			panic("pmap_remove: PA not in pv_tab");
193824eb1037Shibler #endif
193924eb1037Shibler 		ste = (int *)npv->pv_ptste;
194024eb1037Shibler 		ptpmap = npv->pv_ptpmap;
194124eb1037Shibler 		pv->pv_next = npv->pv_next;
194224eb1037Shibler 		free((caddr_t)npv, M_VMPVENT);
194324eb1037Shibler 		pv = pa_to_pvh(pa);
194424eb1037Shibler 	}
194524eb1037Shibler #ifdef HAVEVAC
194624eb1037Shibler 	/*
194724eb1037Shibler 	 * If only one mapping left we no longer need to cache inhibit
194824eb1037Shibler 	 */
194924eb1037Shibler 	if (pmap_aliasmask &&
195024eb1037Shibler 	    pv->pv_pmap && pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
195124eb1037Shibler #ifdef DEBUG
195224eb1037Shibler 		if (pmapdebug & PDB_CACHE)
195324eb1037Shibler 			printf("remove: clearing CI for pa %x\n", pa);
195424eb1037Shibler #endif
195524eb1037Shibler 		pv->pv_flags &= ~PV_CI;
195624eb1037Shibler 		pmap_changebit(pa, PG_CI, FALSE);
195724eb1037Shibler #ifdef DEBUG
195824eb1037Shibler 		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
195924eb1037Shibler 		    (PDB_CACHE|PDB_PVDUMP))
196024eb1037Shibler 			pmap_pvdump(pa);
196124eb1037Shibler #endif
196224eb1037Shibler 	}
196324eb1037Shibler #endif
196424eb1037Shibler 	/*
196524eb1037Shibler 	 * If this was a PT page we must also remove the
196624eb1037Shibler 	 * mapping from the associated segment table.
196724eb1037Shibler 	 */
196824eb1037Shibler 	if (ste) {
196924eb1037Shibler #ifdef PMAPSTATS
197024eb1037Shibler 		remove_stats.ptinvalid++;
197124eb1037Shibler #endif
197224eb1037Shibler #ifdef DEBUG
197324eb1037Shibler 		if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE))
197424eb1037Shibler 			printf("remove: ste was %x@%x pte was %x@%x\n",
197524eb1037Shibler 			       *ste, ste, *(int *)&opte, pmap_pte(pmap, va));
197624eb1037Shibler #endif
197724eb1037Shibler #if defined(HP380)
197824eb1037Shibler 		if (mmutype == MMU_68040) {
197924eb1037Shibler 			int *este = &ste[NPTEPG/SG4_LEV3SIZE];
198024eb1037Shibler 
198124eb1037Shibler 			while (ste < este)
198224eb1037Shibler 				*ste++ = SG_NV;
198324eb1037Shibler #ifdef DEBUG
198424eb1037Shibler 			ste -= NPTEPG/SG4_LEV3SIZE;
198524eb1037Shibler #endif
198624eb1037Shibler 		} else
198724eb1037Shibler #endif
198824eb1037Shibler 		*ste = SG_NV;
198924eb1037Shibler 		/*
199024eb1037Shibler 		 * If it was a user PT page, we decrement the
199124eb1037Shibler 		 * reference count on the segment table as well,
199224eb1037Shibler 		 * freeing it if it is now empty.
199324eb1037Shibler 		 */
199424eb1037Shibler 		if (ptpmap != kernel_pmap) {
199524eb1037Shibler #ifdef DEBUG
199624eb1037Shibler 			if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
199724eb1037Shibler 				printf("remove: stab %x, refcnt %d\n",
199824eb1037Shibler 				       ptpmap->pm_stab, ptpmap->pm_sref - 1);
199924eb1037Shibler 			if ((pmapdebug & PDB_PARANOIA) &&
200024eb1037Shibler 			    ptpmap->pm_stab != (st_entry_t *)trunc_page(ste))
200124eb1037Shibler 				panic("remove: bogus ste");
200224eb1037Shibler #endif
200324eb1037Shibler 			if (--(ptpmap->pm_sref) == 0) {
200424eb1037Shibler #ifdef DEBUG
200524eb1037Shibler 				if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB))
200624eb1037Shibler 					printf("remove: free stab %x\n",
200724eb1037Shibler 					       ptpmap->pm_stab);
200824eb1037Shibler #endif
200922ac56c6Shibler 				kmem_free_wakeup(st_map,
201024eb1037Shibler 						 (vm_offset_t)ptpmap->pm_stab,
201124eb1037Shibler 						 HP_STSIZE);
201224eb1037Shibler 				ptpmap->pm_stab = Segtabzero;
201324eb1037Shibler 				ptpmap->pm_stpa = Segtabzeropa;
201424eb1037Shibler #if defined(HP380)
201524eb1037Shibler 				if (mmutype == MMU_68040)
201624eb1037Shibler 					ptpmap->pm_stfree = protostfree;
201724eb1037Shibler #endif
201824eb1037Shibler 				ptpmap->pm_stchanged = TRUE;
201924eb1037Shibler 				/*
202024eb1037Shibler 				 * XXX may have changed segment table
202124eb1037Shibler 				 * pointer for current process so
202224eb1037Shibler 				 * update now to reload hardware.
202324eb1037Shibler 				 */
202424eb1037Shibler 				if (ptpmap == curproc->p_vmspace->vm_map.pmap)
202524eb1037Shibler 					PMAP_ACTIVATE(ptpmap,
202624eb1037Shibler 					    (struct pcb *)curproc->p_addr, 1);
202724eb1037Shibler 			}
20281d2d1332Shibler #ifdef DEBUG
20291d2d1332Shibler 			else if (ptpmap->pm_sref < 0)
20301d2d1332Shibler 				panic("remove: sref < 0");
20311d2d1332Shibler #endif
203224eb1037Shibler 		}
203324eb1037Shibler #if 0
203424eb1037Shibler 		/*
203524eb1037Shibler 		 * XXX this should be unnecessary as we have been
203624eb1037Shibler 		 * flushing individual mappings as we go.
203724eb1037Shibler 		 */
203824eb1037Shibler 		if (ptpmap == kernel_pmap)
203924eb1037Shibler 			TBIAS();
204024eb1037Shibler 		else
204124eb1037Shibler 			TBIAU();
204224eb1037Shibler #endif
204324eb1037Shibler 		pv->pv_flags &= ~PV_PTPAGE;
204424eb1037Shibler 		ptpmap->pm_ptpages--;
204524eb1037Shibler 	}
204624eb1037Shibler 	/*
204724eb1037Shibler 	 * Update saved attributes for managed page
204824eb1037Shibler 	 */
204924eb1037Shibler 	pmap_attributes[pa_index(pa)] |= bits;
205024eb1037Shibler 	splx(s);
205124eb1037Shibler }
205224eb1037Shibler 
20531537129bSmckusick /* static */
20541537129bSmckusick boolean_t
pmap_testbit(pa,bit)20551537129bSmckusick pmap_testbit(pa, bit)
20561537129bSmckusick 	register vm_offset_t pa;
20571537129bSmckusick 	int bit;
20581537129bSmckusick {
20591537129bSmckusick 	register pv_entry_t pv;
206014b153a2Shibler 	register int *pte;
20611537129bSmckusick 	int s;
20621537129bSmckusick 
20631537129bSmckusick 	if (pa < vm_first_phys || pa >= vm_last_phys)
20641537129bSmckusick 		return(FALSE);
20651537129bSmckusick 
20661537129bSmckusick 	pv = pa_to_pvh(pa);
20671537129bSmckusick 	s = splimp();
20681537129bSmckusick 	/*
20691537129bSmckusick 	 * Check saved info first
20701537129bSmckusick 	 */
20711537129bSmckusick 	if (pmap_attributes[pa_index(pa)] & bit) {
20721537129bSmckusick 		splx(s);
20731537129bSmckusick 		return(TRUE);
20741537129bSmckusick 	}
207524eb1037Shibler #ifdef HAVEVAC
20761537129bSmckusick 	/*
20771537129bSmckusick 	 * Flush VAC to get correct state of any hardware maintained bits.
20781537129bSmckusick 	 */
20791537129bSmckusick 	if (pmap_aliasmask && (bit & (PG_U|PG_M)))
20801537129bSmckusick 		DCIS();
208124eb1037Shibler #endif
20821537129bSmckusick 	/*
20831537129bSmckusick 	 * Not found, check current mappings returning
20841537129bSmckusick 	 * immediately if found.
20851537129bSmckusick 	 */
2086d45eccefSkarels 	if (pv->pv_pmap != NULL) {
20871537129bSmckusick 		for (; pv; pv = pv->pv_next) {
20881537129bSmckusick 			pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
208914b153a2Shibler 			if (*pte & bit) {
209014b153a2Shibler 				splx(s);
209114b153a2Shibler 				return(TRUE);
209214b153a2Shibler 			}
209314b153a2Shibler 		}
20941537129bSmckusick 	}
20951537129bSmckusick 	splx(s);
20961537129bSmckusick 	return(FALSE);
20971537129bSmckusick }
20981537129bSmckusick 
20991537129bSmckusick /* static */
210024eb1037Shibler void
pmap_changebit(pa,bit,setem)21011537129bSmckusick pmap_changebit(pa, bit, setem)
21021537129bSmckusick 	register vm_offset_t pa;
21031537129bSmckusick 	int bit;
21041537129bSmckusick 	boolean_t setem;
21051537129bSmckusick {
21061537129bSmckusick 	register pv_entry_t pv;
210714b153a2Shibler 	register int *pte, npte;
21081537129bSmckusick 	vm_offset_t va;
21091537129bSmckusick 	int s;
21101537129bSmckusick 	boolean_t firstpage = TRUE;
211124eb1037Shibler #ifdef PMAPSTATS
211224eb1037Shibler 	struct chgstats *chgp;
211324eb1037Shibler #endif
21141537129bSmckusick 
21151537129bSmckusick #ifdef DEBUG
21161537129bSmckusick 	if (pmapdebug & PDB_BITS)
21171537129bSmckusick 		printf("pmap_changebit(%x, %x, %s)\n",
21181537129bSmckusick 		       pa, bit, setem ? "set" : "clear");
21191537129bSmckusick #endif
21201537129bSmckusick 	if (pa < vm_first_phys || pa >= vm_last_phys)
21211537129bSmckusick 		return;
21221537129bSmckusick 
212324eb1037Shibler #ifdef PMAPSTATS
212424eb1037Shibler 	chgp = &changebit_stats[(bit>>2)-1];
212524eb1037Shibler 	if (setem)
212624eb1037Shibler 		chgp->setcalls++;
212724eb1037Shibler 	else
212824eb1037Shibler 		chgp->clrcalls++;
212924eb1037Shibler #endif
21301537129bSmckusick 	pv = pa_to_pvh(pa);
21311537129bSmckusick 	s = splimp();
21321537129bSmckusick 	/*
21331537129bSmckusick 	 * Clear saved attributes (modify, reference)
21341537129bSmckusick 	 */
21351537129bSmckusick 	if (!setem)
21361537129bSmckusick 		pmap_attributes[pa_index(pa)] &= ~bit;
21371537129bSmckusick 	/*
21381537129bSmckusick 	 * Loop over all current mappings setting/clearing as appropos
21391537129bSmckusick 	 * If setting RO do we need to clear the VAC?
21401537129bSmckusick 	 */
2141d45eccefSkarels 	if (pv->pv_pmap != NULL) {
21421537129bSmckusick #ifdef DEBUG
21431537129bSmckusick 		int toflush = 0;
21441537129bSmckusick #endif
21451537129bSmckusick 		for (; pv; pv = pv->pv_next) {
21461537129bSmckusick #ifdef DEBUG
21471537129bSmckusick 			toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
21481537129bSmckusick #endif
21491537129bSmckusick 			va = pv->pv_va;
21509c60e975Shibler 
21519c60e975Shibler 			/*
21529c60e975Shibler 			 * XXX don't write protect pager mappings
21539c60e975Shibler 			 */
21549c60e975Shibler 			if (bit == PG_RO) {
21559c60e975Shibler 				extern vm_offset_t pager_sva, pager_eva;
21569c60e975Shibler 
21579c60e975Shibler 				if (va >= pager_sva && va < pager_eva)
21589c60e975Shibler 					continue;
21599c60e975Shibler 			}
21609c60e975Shibler 
21611537129bSmckusick 			pte = (int *) pmap_pte(pv->pv_pmap, va);
216224eb1037Shibler #ifdef HAVEVAC
21631537129bSmckusick 			/*
21641537129bSmckusick 			 * Flush VAC to ensure we get correct state of HW bits
21651537129bSmckusick 			 * so we don't clobber them.
21661537129bSmckusick 			 */
21671537129bSmckusick 			if (firstpage && pmap_aliasmask) {
21681537129bSmckusick 				firstpage = FALSE;
21691537129bSmckusick 				DCIS();
21701537129bSmckusick 			}
217124eb1037Shibler #endif
21721537129bSmckusick 			if (setem)
21731537129bSmckusick 				npte = *pte | bit;
21741537129bSmckusick 			else
21751537129bSmckusick 				npte = *pte & ~bit;
21761537129bSmckusick 			if (*pte != npte) {
217724eb1037Shibler #if defined(HP380)
217824eb1037Shibler 				/*
217924eb1037Shibler 				 * If we are changing caching status or
218024eb1037Shibler 				 * protection make sure the caches are
218124eb1037Shibler 				 * flushed (but only once).
218224eb1037Shibler 				 */
218324eb1037Shibler 				if (firstpage && mmutype == MMU_68040 &&
218424eb1037Shibler 				    (bit == PG_RO && setem ||
218524eb1037Shibler 				     (bit & PG_CMASK))) {
218624eb1037Shibler 					firstpage = FALSE;
218724eb1037Shibler 					DCFP(pa);
218824eb1037Shibler 					ICPP(pa);
218924eb1037Shibler 				}
219024eb1037Shibler #endif
21911537129bSmckusick 				*pte = npte;
219224eb1037Shibler 				if (active_pmap(pv->pv_pmap))
21931537129bSmckusick 					TBIS(va);
219424eb1037Shibler #ifdef PMAPSTATS
219514b153a2Shibler 				if (setem)
219624eb1037Shibler 					chgp->sethits++;
219714b153a2Shibler 				else
219824eb1037Shibler 					chgp->clrhits++;
219924eb1037Shibler #endif
220024eb1037Shibler 			}
220124eb1037Shibler #ifdef PMAPSTATS
220224eb1037Shibler 			else {
220324eb1037Shibler 				if (setem)
220424eb1037Shibler 					chgp->setmiss++;
220524eb1037Shibler 				else
220624eb1037Shibler 					chgp->clrmiss++;
220714b153a2Shibler 			}
220814b153a2Shibler #endif
220914b153a2Shibler 		}
221024eb1037Shibler #if defined(HAVEVAC) && defined(DEBUG)
22111537129bSmckusick 		if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) {
22121537129bSmckusick 			if ((pmapvacflush & PVF_TOTAL) || toflush == 3)
22131537129bSmckusick 				DCIA();
22141537129bSmckusick 			else if (toflush == 2)
22151537129bSmckusick 				DCIS();
22161537129bSmckusick 			else
22171537129bSmckusick 				DCIU();
22181537129bSmckusick 		}
22191537129bSmckusick #endif
22201537129bSmckusick 	}
22211537129bSmckusick 	splx(s);
22221537129bSmckusick }
22231537129bSmckusick 
22241537129bSmckusick /* static */
22251537129bSmckusick void
pmap_enter_ptpage(pmap,va)22261537129bSmckusick pmap_enter_ptpage(pmap, va)
22271537129bSmckusick 	register pmap_t pmap;
22281537129bSmckusick 	register vm_offset_t va;
22291537129bSmckusick {
22301537129bSmckusick 	register vm_offset_t ptpa;
22311537129bSmckusick 	register pv_entry_t pv;
22321537129bSmckusick 	st_entry_t *ste;
22331537129bSmckusick 	int s;
22341537129bSmckusick 
22351537129bSmckusick #ifdef DEBUG
22361537129bSmckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))
22371537129bSmckusick 		printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va);
223824eb1037Shibler #endif
223924eb1037Shibler #ifdef PMAPSTATS
22401537129bSmckusick 	enter_stats.ptpneeded++;
22411537129bSmckusick #endif
22421537129bSmckusick 	/*
22431537129bSmckusick 	 * Allocate a segment table if necessary.  Note that it is allocated
224422ac56c6Shibler 	 * from a private map and not pt_map.  This keeps user page tables
22451537129bSmckusick 	 * aligned on segment boundaries in the kernel address space.
22461537129bSmckusick 	 * The segment table is wired down.  It will be freed whenever the
22471537129bSmckusick 	 * reference count drops to zero.
22481537129bSmckusick 	 */
22491537129bSmckusick 	if (pmap->pm_stab == Segtabzero) {
22501537129bSmckusick 		pmap->pm_stab = (st_entry_t *)
225122ac56c6Shibler 			kmem_alloc(st_map, HP_STSIZE);
225214b153a2Shibler 		pmap->pm_stpa = (st_entry_t *)
225314b153a2Shibler 			pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_stab);
225414b153a2Shibler #if defined(HP380)
225514b153a2Shibler 		if (mmutype == MMU_68040) {
225614b153a2Shibler #ifdef DEBUG
225714b153a2Shibler 			if (dowriteback && dokwriteback)
225814b153a2Shibler #endif
225911a2ec72Shibler 			pmap_changebit((vm_offset_t)pmap->pm_stpa, PG_CCB, 0);
226014b153a2Shibler 			pmap->pm_stfree = protostfree;
226114b153a2Shibler 		}
226214b153a2Shibler #endif
22631537129bSmckusick 		pmap->pm_stchanged = TRUE;
22641537129bSmckusick 		/*
22651537129bSmckusick 		 * XXX may have changed segment table pointer for current
22661537129bSmckusick 		 * process so update now to reload hardware.
22671537129bSmckusick 		 */
2268d45eccefSkarels 		if (pmap == curproc->p_vmspace->vm_map.pmap)
2269d45eccefSkarels 			PMAP_ACTIVATE(pmap, (struct pcb *)curproc->p_addr, 1);
22701537129bSmckusick #ifdef DEBUG
22711537129bSmckusick 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
227214b153a2Shibler 			printf("enter: pmap %x stab %x(%x)\n",
227314b153a2Shibler 			       pmap, pmap->pm_stab, pmap->pm_stpa);
22741537129bSmckusick #endif
22751537129bSmckusick 	}
22761537129bSmckusick 
22771537129bSmckusick 	ste = pmap_ste(pmap, va);
227814b153a2Shibler #if defined(HP380)
227914b153a2Shibler 	/*
228014b153a2Shibler 	 * Allocate level 2 descriptor block if necessary
228114b153a2Shibler 	 */
228214b153a2Shibler 	if (mmutype == MMU_68040) {
228314b153a2Shibler 		if (!ste->sg_v) {
228414b153a2Shibler 			int ix;
228514b153a2Shibler 			caddr_t addr;
228614b153a2Shibler 
228714b153a2Shibler 			ix = bmtol2(pmap->pm_stfree);
228814b153a2Shibler 			if (ix == -1)
228914b153a2Shibler 				panic("enter: out of address space"); /* XXX */
229014b153a2Shibler 			pmap->pm_stfree &= ~l2tobm(ix);
229114b153a2Shibler 			addr = (caddr_t)&pmap->pm_stab[ix*SG4_LEV2SIZE];
229214b153a2Shibler 			bzero(addr, SG4_LEV2SIZE*sizeof(st_entry_t));
229314b153a2Shibler 			addr = (caddr_t)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
229414b153a2Shibler 			*(int *)ste = (u_int)addr | SG_RW | SG_U | SG_V;
229514b153a2Shibler #ifdef DEBUG
229614b153a2Shibler 			if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
229714b153a2Shibler 				printf("enter: alloc ste2 %d(%x)\n", ix, addr);
229814b153a2Shibler #endif
229914b153a2Shibler 		}
230014b153a2Shibler 		ste = pmap_ste2(pmap, va);
230114b153a2Shibler 		/*
230214b153a2Shibler 		 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
230314b153a2Shibler 		 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
230414b153a2Shibler 		 * (16) such descriptors (NBPG/SG4_LEV3SIZE bytes) to map a
230514b153a2Shibler 		 * PT page--the unit of allocation.  We set `ste' to point
230614b153a2Shibler 		 * to the first entry of that chunk which is validated in its
230714b153a2Shibler 		 * entirety below.
230814b153a2Shibler 		 */
230914b153a2Shibler 		ste = (st_entry_t *)((int)ste & ~(NBPG/SG4_LEV3SIZE-1));
231014b153a2Shibler #ifdef DEBUG
231114b153a2Shibler 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
231214b153a2Shibler 			printf("enter: ste2 %x (%x)\n",
231314b153a2Shibler 			       pmap_ste2(pmap, va), ste);
231414b153a2Shibler #endif
231514b153a2Shibler 	}
231614b153a2Shibler #endif
23171537129bSmckusick 	va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
23181537129bSmckusick 
23191537129bSmckusick 	/*
23201537129bSmckusick 	 * In the kernel we allocate a page from the kernel PT page
23211537129bSmckusick 	 * free list and map it into the kernel page table map (via
23221537129bSmckusick 	 * pmap_enter).
23231537129bSmckusick 	 */
23241537129bSmckusick 	if (pmap == kernel_pmap) {
23251537129bSmckusick 		register struct kpt_page *kpt;
23261537129bSmckusick 
23271537129bSmckusick 		s = splimp();
23281537129bSmckusick 		if ((kpt = kpt_free_list) == (struct kpt_page *)0) {
23291537129bSmckusick 			/*
23301537129bSmckusick 			 * No PT pages available.
23311537129bSmckusick 			 * Try once to free up unused ones.
23321537129bSmckusick 			 */
23331537129bSmckusick #ifdef DEBUG
23341537129bSmckusick 			if (pmapdebug & PDB_COLLECT)
23351537129bSmckusick 				printf("enter: no KPT pages, collecting...\n");
23361537129bSmckusick #endif
23371537129bSmckusick 			pmap_collect(kernel_pmap);
23381537129bSmckusick 			if ((kpt = kpt_free_list) == (struct kpt_page *)0)
23391537129bSmckusick 				panic("pmap_enter_ptpage: can't get KPT page");
23401537129bSmckusick 		}
234124eb1037Shibler #ifdef PMAPSTATS
23421537129bSmckusick 		if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse)
23431537129bSmckusick 			kpt_stats.kptmaxuse = kpt_stats.kptinuse;
23441537129bSmckusick #endif
23451537129bSmckusick 		kpt_free_list = kpt->kpt_next;
23461537129bSmckusick 		kpt->kpt_next = kpt_used_list;
23471537129bSmckusick 		kpt_used_list = kpt;
23481537129bSmckusick 		ptpa = kpt->kpt_pa;
23492a60f21fSmckusick 		bzero((caddr_t)kpt->kpt_va, HP_PAGE_SIZE);
23501537129bSmckusick 		pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT, TRUE);
23511537129bSmckusick #ifdef DEBUG
235214b153a2Shibler 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
235314b153a2Shibler 			int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
235414b153a2Shibler 
23551537129bSmckusick 			printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n",
235614b153a2Shibler 			       ix, *(int *)&Sysptmap[ix], kpt->kpt_va);
235714b153a2Shibler 		}
23581537129bSmckusick #endif
23591537129bSmckusick 		splx(s);
23601537129bSmckusick 	}
23611537129bSmckusick 	/*
23621537129bSmckusick 	 * For user processes we just simulate a fault on that location
23631537129bSmckusick 	 * letting the VM system allocate a zero-filled page.
23641537129bSmckusick 	 */
23651537129bSmckusick 	else {
23661537129bSmckusick #ifdef DEBUG
23671537129bSmckusick 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
23681537129bSmckusick 			printf("enter: about to fault UPT pg at %x\n", va);
236981dd55b1Shibler #endif
237014b153a2Shibler 		s = vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
237114b153a2Shibler 		if (s != KERN_SUCCESS) {
237214b153a2Shibler 			printf("vm_fault(pt_map, %x, RW, 0) -> %d\n", va, s);
237314b153a2Shibler 			panic("pmap_enter: vm_fault failed");
237414b153a2Shibler 		}
23751537129bSmckusick 		ptpa = pmap_extract(kernel_pmap, va);
237681dd55b1Shibler 		/*
237781dd55b1Shibler 		 * Mark the page clean now to avoid its pageout (and
237881dd55b1Shibler 		 * hence creation of a pager) between now and when it
237981dd55b1Shibler 		 * is wired; i.e. while it is on a paging queue.
238081dd55b1Shibler 		 */
238181dd55b1Shibler 		PHYS_TO_VM_PAGE(ptpa)->flags |= PG_CLEAN;
23821537129bSmckusick #ifdef DEBUG
2383d41d7573Smckusick 		PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;
23841537129bSmckusick #endif
23851537129bSmckusick 	}
238614b153a2Shibler #if defined(HP380)
238714b153a2Shibler 	/*
238814b153a2Shibler 	 * Turn off copyback caching of page table pages,
238914b153a2Shibler 	 * could get ugly otherwise.
239014b153a2Shibler 	 */
239114b153a2Shibler #ifdef DEBUG
239214b153a2Shibler 	if (dowriteback && dokwriteback)
239314b153a2Shibler #endif
239414b153a2Shibler 	if (mmutype == MMU_68040) {
239514b153a2Shibler 		int *pte = (int *)pmap_pte(kernel_pmap, va);
239614b153a2Shibler #ifdef DEBUG
239714b153a2Shibler 		if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
239814b153a2Shibler 			printf("%s PT no CCB: kva=%x ptpa=%x pte@%x=%x\n",
239914b153a2Shibler 			       pmap == kernel_pmap ? "Kernel" : "User",
240014b153a2Shibler 			       va, ptpa, pte, *pte);
240114b153a2Shibler #endif
240214b153a2Shibler 		pmap_changebit(ptpa, PG_CCB, 0);
240314b153a2Shibler 	}
240414b153a2Shibler #endif
24051537129bSmckusick 	/*
24061537129bSmckusick 	 * Locate the PV entry in the kernel for this PT page and
24071537129bSmckusick 	 * record the STE address.  This is so that we can invalidate
24081537129bSmckusick 	 * the STE when we remove the mapping for the page.
24091537129bSmckusick 	 */
24101537129bSmckusick 	pv = pa_to_pvh(ptpa);
24111537129bSmckusick 	s = splimp();
24121537129bSmckusick 	if (pv) {
24131537129bSmckusick 		pv->pv_flags |= PV_PTPAGE;
24141537129bSmckusick 		do {
24151537129bSmckusick 			if (pv->pv_pmap == kernel_pmap && pv->pv_va == va)
24161537129bSmckusick 				break;
24171537129bSmckusick 		} while (pv = pv->pv_next);
24181537129bSmckusick 	}
24191537129bSmckusick #ifdef DEBUG
2420d45eccefSkarels 	if (pv == NULL)
24211537129bSmckusick 		panic("pmap_enter_ptpage: PT page not entered");
24221537129bSmckusick #endif
24231537129bSmckusick 	pv->pv_ptste = ste;
24241537129bSmckusick 	pv->pv_ptpmap = pmap;
24251537129bSmckusick #ifdef DEBUG
24261537129bSmckusick 	if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
24271537129bSmckusick 		printf("enter: new PT page at PA %x, ste at %x\n", ptpa, ste);
24281537129bSmckusick #endif
24291537129bSmckusick 
24301537129bSmckusick 	/*
24311537129bSmckusick 	 * Map the new PT page into the segment table.
24321537129bSmckusick 	 * Also increment the reference count on the segment table if this
24331537129bSmckusick 	 * was a user page table page.  Note that we don't use vm_map_pageable
24341537129bSmckusick 	 * to keep the count like we do for PT pages, this is mostly because
24351537129bSmckusick 	 * it would be difficult to identify ST pages in pmap_pageable to
24361537129bSmckusick 	 * release them.  We also avoid the overhead of vm_map_pageable.
24371537129bSmckusick 	 */
243814b153a2Shibler #if defined(HP380)
243914b153a2Shibler 	if (mmutype == MMU_68040) {
244014b153a2Shibler 		st_entry_t *este;
244114b153a2Shibler 
244214b153a2Shibler 		for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
244314b153a2Shibler 			*(int *)ste = ptpa | SG_U | SG_RW | SG_V;
244414b153a2Shibler 			ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
244514b153a2Shibler 		}
244614b153a2Shibler 	} else
244714b153a2Shibler #endif
24481537129bSmckusick 	*(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
24491537129bSmckusick 	if (pmap != kernel_pmap) {
24501537129bSmckusick 		pmap->pm_sref++;
24511537129bSmckusick #ifdef DEBUG
24521537129bSmckusick 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
24531537129bSmckusick 			printf("enter: stab %x refcnt %d\n",
24541537129bSmckusick 			       pmap->pm_stab, pmap->pm_sref);
24551537129bSmckusick #endif
24561537129bSmckusick 	}
245724eb1037Shibler #if 0
24581537129bSmckusick 	/*
24591537129bSmckusick 	 * Flush stale TLB info.
24601537129bSmckusick 	 */
24611537129bSmckusick 	if (pmap == kernel_pmap)
24621537129bSmckusick 		TBIAS();
24631537129bSmckusick 	else
24641537129bSmckusick 		TBIAU();
246524eb1037Shibler #endif
24661537129bSmckusick 	pmap->pm_ptpages++;
24671537129bSmckusick 	splx(s);
24681537129bSmckusick }
24691537129bSmckusick 
24701537129bSmckusick #ifdef DEBUG
247124eb1037Shibler /* static */
247224eb1037Shibler void
pmap_pvdump(pa)24731537129bSmckusick pmap_pvdump(pa)
24741537129bSmckusick 	vm_offset_t pa;
24751537129bSmckusick {
24761537129bSmckusick 	register pv_entry_t pv;
24771537129bSmckusick 
24781537129bSmckusick 	printf("pa %x", pa);
24791537129bSmckusick 	for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next)
24801537129bSmckusick 		printf(" -> pmap %x, va %x, ptste %x, ptpmap %x, flags %x",
24811537129bSmckusick 		       pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
24821537129bSmckusick 		       pv->pv_flags);
24831537129bSmckusick 	printf("\n");
24841537129bSmckusick }
24851537129bSmckusick 
248624eb1037Shibler /* static */
248724eb1037Shibler void
pmap_check_wiring(str,va)24881537129bSmckusick pmap_check_wiring(str, va)
24891537129bSmckusick 	char *str;
24901537129bSmckusick 	vm_offset_t va;
24911537129bSmckusick {
24921537129bSmckusick 	vm_map_entry_t entry;
24931537129bSmckusick 	register int count, *pte;
24941537129bSmckusick 
24951537129bSmckusick 	va = trunc_page(va);
249614b153a2Shibler 	if (!pmap_ste_v(kernel_pmap, va) ||
24971537129bSmckusick 	    !pmap_pte_v(pmap_pte(kernel_pmap, va)))
24981537129bSmckusick 		return;
24991537129bSmckusick 
25001537129bSmckusick 	if (!vm_map_lookup_entry(pt_map, va, &entry)) {
25011537129bSmckusick 		printf("wired_check: entry for %x not found\n", va);
25021537129bSmckusick 		return;
25031537129bSmckusick 	}
25041537129bSmckusick 	count = 0;
25051537129bSmckusick 	for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)
25061537129bSmckusick 		if (*pte)
25071537129bSmckusick 			count++;
25081537129bSmckusick 	if (entry->wired_count != count)
25091537129bSmckusick 		printf("*%s*: %x: w%d/a%d\n",
25101537129bSmckusick 		       str, va, entry->wired_count, count);
25111537129bSmckusick }
25121537129bSmckusick #endif
2513