xref: /original-bsd/sys/hp300/include/pmap.h (revision 13deef58)
1 /*
2  * Copyright (c) 1987 Carnegie-Mellon University
3  * Copyright (c) 1991 Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  *	@(#)pmap.h	7.10 (Berkeley) 06/22/92
13  */
14 
15 #ifndef	_PMAP_MACHINE_
16 #define	_PMAP_MACHINE_
17 
18 #define HP_PAGE_SIZE	NBPG
19 #if defined(HP380)
20 #define HP_SEG_SIZE	(mmutype == MMU_68040 ? 0x40000 : NBSEG)
21 #else
22 #define HP_SEG_SIZE	NBSEG
23 #endif
24 
25 #define hp300_trunc_seg(x)	(((unsigned)(x)) & ~(HP_SEG_SIZE-1))
26 #define hp300_round_seg(x)	hp300_trunc_seg((unsigned)(x) + HP_SEG_SIZE-1)
27 
28 /*
29  * Pmap stuff
30  */
31 struct pmap {
32 	struct pte		*pm_ptab;	/* KVA of page table */
33 	struct ste		*pm_stab;	/* KVA of segment table */
34 	int			pm_stchanged;	/* ST changed */
35 	int			pm_stfree;	/* 040: free lev2 blocks */
36 	struct ste		*pm_stpa;	/* 040: ST phys addr */
37 	short			pm_sref;	/* segment table ref count */
38 	short			pm_count;	/* pmap reference count */
39 	simple_lock_data_t	pm_lock;	/* lock on pmap */
40 	struct pmap_statistics	pm_stats;	/* pmap statistics */
41 	long			pm_ptpages;	/* more stats: PT pages */
42 };
43 
44 typedef struct pmap	*pmap_t;
45 
46 extern struct pmap	kernel_pmap_store;
47 #define kernel_pmap (&kernel_pmap_store)
48 
49 /*
50  * On the 040 we keep track of which level 2 blocks are already in use
51  * with the pm_stfree mask.  Bits are arranged from LSB (block 0) to MSB
52  * (block 31).  For convenience, the level 1 table is considered to be
53  * block 0.
54  *
55  * MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
56  * for the kernel and users.  8 implies only the initial "segment table"
57  * page is used.  WARNING: don't change MAXUL2SIZE unless you can allocate
58  * physically contiguous pages for the ST in pmap.c!
59  */
60 #define	MAXKL2SIZE	32
61 #define MAXUL2SIZE	8
62 #define l2tobm(n)	(1 << (n))
63 #define	bmtol2(n)	(ffs(n) - 1)
64 
65 /*
66  * Macros for speed
67  */
68 #define PMAP_ACTIVATE(pmapp, pcbp, iscurproc) \
69 	if ((pmapp) != NULL && (pmapp)->pm_stchanged) { \
70 		(pcbp)->pcb_ustp = hp300_btop((vm_offset_t)(pmapp)->pm_stpa); \
71 		if (iscurproc) \
72 			loadustp((pcbp)->pcb_ustp); \
73 		(pmapp)->pm_stchanged = FALSE; \
74 	}
75 #define PMAP_DEACTIVATE(pmapp, pcbp)
76 
77 /*
78  * For each vm_page_t, there is a list of all currently valid virtual
79  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
80  */
81 typedef struct pv_entry {
82 	struct pv_entry	*pv_next;	/* next pv_entry */
83 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
84 	vm_offset_t	pv_va;		/* virtual address for mapping */
85 	struct ste	*pv_ptste;	/* non-zero if VA maps a PT page */
86 	struct pmap	*pv_ptpmap;	/* if pv_ptste, pmap for PT page */
87 	int		pv_flags;	/* flags */
88 } *pv_entry_t;
89 
90 #define	PV_CI		0x01	/* all entries must be cache inhibited */
91 #define PV_PTPAGE	0x02	/* entry maps a page table page */
92 
93 #ifdef	KERNEL
94 pv_entry_t	pv_table;		/* array of entries, one per page */
95 
96 #define pa_index(pa)		atop(pa - vm_first_phys)
97 #define pa_to_pvh(pa)		(&pv_table[pa_index(pa)])
98 
99 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
100 
101 extern	struct pte *Sysmap;
102 extern	char *vmmap;			/* map for mem, dumps, etc. */
103 #endif	KERNEL
104 
105 #endif	_PMAP_MACHINE_
106