xref: /original-bsd/sys/i386/include/pmap.h (revision 58db4230)
1 /*
2  * Copyright (c) 1987 Carnegie-Mellon University
3  * Copyright (c) 1991 Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * The Mach Operating System project at Carnegie-Mellon University.
8  *
9  * The CMU software License Agreement specifies the terms and conditions
10  * for use and redistribution.
11  *
12  * from hp300:	@(#)pmap.h	7.2 (Berkeley) 12/16/90
13  *
14  *	@(#)pmap.h	1.1 (Berkeley) 01/18/91
15  */
16 
17 #ifndef	_PMAP_MACHINE_
18 #define	_PMAP_MACHINE_	1
19 
20 #include "sys/lock.h"
21 #include "machine/vmparam.h"
22 #include "vm/vm_statistics.h"
23 
24 /*
25  * 386 page table entry and page table directory
26  * W.Jolitz, 8/89
27  */
28 
29 struct pde
30 {
31 unsigned int
32 		pd_v:1,			/* valid bit */
33 		pd_prot:2,		/* access control */
34 		pd_mbz1:2,		/* reserved, must be zero */
35 		pd_u:1,			/* hardware maintained 'used' bit */
36 		:1,			/* not used */
37 		pd_mbz2:2,		/* reserved, must be zero */
38 		:3,			/* reserved for software */
39 		pd_pfnum:20;		/* physical page frame number of pte's*/
40 };
41 
42 #define	PD_MASK		0xffc00000	/* page directory address bits */
43 #define	PD_SHIFT	22		/* page directory address bits */
44 
45 struct pte
46 {
47 unsigned int
48 		pg_v:1,			/* valid bit */
49 		pg_prot:2,		/* access control */
50 		pg_mbz1:2,		/* reserved, must be zero */
51 		pg_u:1,			/* hardware maintained 'used' bit */
52 		pg_m:1,			/* hardware maintained modified bit */
53 		pg_mbz2:2,		/* reserved, must be zero */
54 		pg_w:1,			/* software, wired down page */
55 		:1,			/* software (unused) */
56 		pg_nc:1,		/* 'uncacheable page' bit */
57 		pg_pfnum:20;		/* physical page frame number */
58 };
59 
60 #define	PG_V		0x00000001
61 #define	PG_PROT		0x00000006 /* all protection bits . */
62 #define	PG_W		0x00000200
63 #define PG_N		0x00000800 /* Non-cacheable */
64 #define	PG_M		0x00000040
65 #define PG_U		0x00000020
66 #define	PG_FRAME	0xfffff000
67 
68 #define	PG_NOACC	0
69 #define	PG_KR		0x00000000
70 #define	PG_KW		0x00000002
71 #define	PG_URKR		0x00000004
72 #define	PG_URKW		0x00000004
73 #define	PG_UW		0x00000006
74 
75 /*
76  * Page Protection Exception bits
77  */
78 
79 #define PGEX_P		0x01	/* Protection violation vs. not present */
80 #define PGEX_W		0x02	/* during a Write cycle */
81 #define PGEX_U		0x04	/* access from User mode (UPL) */
82 
83 typedef struct pde	pd_entry_t;	/* page directory entry */
84 typedef struct pte	pt_entry_t;	/* Mach page table entry */
85 
86 #define	PD_ENTRY_NULL	((pd_entry_t *) 0)
87 #define	PT_ENTRY_NULL	((pt_entry_t *) 0)
88 
89 /*
90  * One page directory, shared between
91  * kernel and user modes.
92  */
93 #define I386_PAGE_SIZE	NBPG
94 #define I386_PDR_SIZE	NBPDR
95 
96 #define I386_KPDES	8 /* KPT page directory size */
97 #define I386_UPDES	NBPDR/sizeof(struct pde)-8 /* UPT page directory size */
98 
99 #define I386_MAX_PTSIZE	I386_UPDES*NBPG	/* max size of UPT */
100 #define I386_MAX_KPTSIZE I386_KPDES*NBPG /* max memory to allocate to KPT */
101 
102 /*
103  * Kernel virtual address to page table entry and to physical address.
104  */
105 #define	kvtopte(va) \
106 	(&Sysmap[((unsigned)(va) - VM_MIN_KERNEL_ADDRESS) >> PGSHIFT])
107 #define	ptetokv(pt) \
108 	((((pt_entry_t *)(pt) - Sysmap) << PGSHIFT) + VM_MIN_KERNEL_ADDRESS)
109 #define	kvtophys(va) \
110 	((kvtopte(va)->pg_pfnum << PGSHIFT) | ((int)(va) & PGOFSET))
111 
112 /*
113  * Pmap stuff
114  */
115 #define PMAP_NULL	((pmap_t) 0)
116 
117 struct pmap {
118 	pt_entry_t		*pm_ptab;	/* KVA of page table */
119 	pd_entry_t		*pm_pdir;	/* KVA of page directory */
120 	boolean_t		pm_pdchanged;	/* pdir changed */
121 	short			pm_dref;	/* page directory ref count */
122 	short			pm_count;	/* pmap reference count */
123 	simple_lock_data_t	pm_lock;	/* lock on pmap */
124 	struct pmap_statistics	pm_stats;	/* pmap statistics */
125 	long			pm_ptpages;	/* more stats: PT pages */
126 };
127 
128 typedef struct pmap	*pmap_t;
129 
130 extern pmap_t		kernel_pmap;
131 
132 /*
133  * Macros for speed
134  */
135 #define PMAP_ACTIVATE(pmapp, pcbp) \
136 	if ((pmapp) != PMAP_NULL && (pmapp)->pm_pdchanged) { \
137 		(pcbp)->pcb_cr3 = \
138 		    i386_btop(pmap_extract(kernel_pmap, (pmapp)->pm_pdir)); \
139 		if ((pmapp) == u.u_procp->p_map->pmap) \
140 			load_cr3((pcbp)->pcb_cr3); \
141 		(pmapp)->pm_pdchanged = FALSE; \
142 	}
143 #define PMAP_DEACTIVATE(pmapp, pcbp)
144 
145 /*
146  * For each vm_page_t, there is a list of all currently valid virtual
147  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
148  */
149 typedef struct pv_entry {
150 	struct pv_entry	*pv_next;	/* next pv_entry */
151 	pmap_t		pv_pmap;	/* pmap where mapping lies */
152 	vm_offset_t	pv_va;		/* virtual address for mapping */
153 	pd_entry_t	*pv_ptpde;	/* non-zero if VA maps a PT page */
154 	pmap_t		pv_ptpmap;	/* if pv_ptpde, pmap for PT page */
155 	int		pv_flags;	/* flags */
156 } *pv_entry_t;
157 
158 #define	PV_ENTRY_NULL	((pv_entry_t) 0)
159 
160 #define	PV_CI		0x01	/* all entries must be cache inhibited */
161 #define PV_PTPAGE	0x02	/* entry maps a page table page */
162 
163 #ifdef	KERNEL
164 
165 pv_entry_t	pv_table;		/* array of entries, one per page */
166 
167 #define pa_index(pa)		atop(pa - vm_first_phys)
168 #define pa_to_pvh(pa)		(&pv_table[pa_index(pa)])
169 
170 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
171 
172 extern	pt_entry_t	*Sysmap;
173 #endif	KERNEL
174 
175 #endif	_PMAP_MACHINE_
176