1 /* $OpenBSD: pmap.h,v 1.96 2024/11/08 13:18:29 jsg Exp $ */
2 /* $NetBSD: pmap.h,v 1.44 2000/04/24 17:18:18 thorpej Exp $ */
3
4 /*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * pmap.h: see pmap.c for the history of this pmap module.
31 */
32
33 #ifndef _MACHINE_PMAP_H_
34 #define _MACHINE_PMAP_H_
35
36 #ifdef _KERNEL
37 #include <machine/cpufunc.h>
38 #include <machine/segments.h>
39 #endif
40 #include <sys/mutex.h>
41 #include <uvm/uvm_object.h>
42 #include <machine/pte.h>
43
44 #define PDSLOT_PTE ((KERNBASE/NBPD)-2) /* 830: for recursive PDP map */
45 #define PDSLOT_KERN (KERNBASE/NBPD) /* 832: start of kernel space */
46 #define PDSLOT_APTE ((unsigned)1022) /* 1022: alternative recursive slot */
47
48 /*
49 * The following define determines how many PTPs should be set up for the
50 * kernel by locore.s at boot time. This should be large enough to
51 * get the VM system running. Once the VM system is running, the
52 * pmap module can add more PTPs to the kernel area on demand.
53 */
54
55 #ifndef NKPTP
56 #define NKPTP 8 /* 16/32MB to start */
57 #endif
58 #define NKPTP_MIN 4 /* smallest value we allow */
59
60 /*
61 * PG_AVAIL usage: we make use of the ignored bits of the PTE
62 */
63
64 #define PG_W PG_AVAIL1 /* "wired" mapping */
65 #define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */
66 #define PG_X PG_AVAIL3 /* executable mapping */
67
68 #define PTP0_PA (PAGE_SIZE * 3)
69
70 #ifdef _KERNEL
71 /*
72 * pmap data structures: see pmap.c for details of locking.
73 */
74
75 struct pmap;
76 typedef struct pmap *pmap_t;
77
78 /*
79 * We maintain a list of all non-kernel pmaps.
80 */
81
82 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
83
84 /*
85 * The pmap structure
86 *
87 * Note that the pm_obj contains the reference count,
88 * page list, and number of PTPs within the pmap.
89 */
90
91 struct pmap {
92 uint64_t pm_pdidx[4]; /* PDIEs for PAE mode */
93 uint64_t pm_pdidx_intel[4]; /* PDIEs for PAE mode U-K */
94
95 struct mutex pm_mtx;
96 struct mutex pm_apte_mtx;
97
98 /*
99 * pm_pdir : VA of PD when executing in privileged mode
100 * (lock by objeckt lock)
101 * pm_pdirpa : PA of PD when executing in privileged mode,
102 * (read-only after create)
103 * pm_pdir_intel : VA of PD when executing on Intel CPU in
104 * usermode (no kernel mappings)
105 * pm_pdirpa_intel : PA of PD when executing on Intel CPU in
106 * usermode (no kernel mappings)
107 */
108 paddr_t pm_pdirpa, pm_pdirpa_intel;
109 vaddr_t pm_pdir, pm_pdir_intel;
110 int pm_pdirsize; /* PD size (4k vs 16k on PAE) */
111 struct uvm_object pm_obj; /* object (lck by object lock) */
112 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
113 struct vm_page *pm_ptphint; /* pointer to a PTP in our pmap */
114 struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
115
116 vaddr_t pm_hiexec; /* highest executable mapping */
117 int pm_flags; /* see below */
118
119 struct segment_descriptor pm_codeseg; /* cs descriptor for process */
120 };
121
122 /*
123 * For each managed physical page we maintain a list of <PMAP,VA>s
124 * which it is mapped at. The list is headed by a pv_head structure.
125 * there is one pv_head per managed phys page (allocated at boot time).
126 * The pv_head structure points to a list of pv_entry structures (each
127 * describes one mapping).
128 */
129
130 struct pv_entry { /* locked by its list's pvh_lock */
131 struct pv_entry *pv_next; /* next entry */
132 struct pmap *pv_pmap; /* the pmap */
133 vaddr_t pv_va; /* the virtual address */
134 struct vm_page *pv_ptp; /* the vm_page of the PTP */
135 };
136 /*
137 * MD flags to pmap_enter:
138 */
139
140 /* to get just the pa from params to pmap_enter */
141 #define PMAP_PA_MASK ~((paddr_t)PAGE_MASK)
142 #define PMAP_NOCACHE 0x1 /* map uncached */
143 #define PMAP_WC 0x2 /* map write combining. */
144
145 /*
146 * We keep mod/ref flags in struct vm_page->pg_flags.
147 */
148 #define PG_PMAP_MOD PG_PMAP0
149 #define PG_PMAP_REF PG_PMAP1
150 #define PG_PMAP_WC PG_PMAP2
151
152 /*
153 * pv_entrys are dynamically allocated in chunks from a single page.
154 * we keep track of how many pv_entrys are in use for each page and
155 * we can free pv_entry pages if needed. There is one lock for the
156 * entire allocation system.
157 */
158
159 struct pv_page_info {
160 TAILQ_ENTRY(pv_page) pvpi_list;
161 struct pv_entry *pvpi_pvfree;
162 int pvpi_nfree;
163 };
164
165 /*
166 * number of pv_entries in a pv_page
167 */
168
169 #define PVE_PER_PVPAGE ((PAGE_SIZE - sizeof(struct pv_page_info)) / \
170 sizeof(struct pv_entry))
171
172 /*
173 * a pv_page: where pv_entrys are allocated from
174 */
175
176 struct pv_page {
177 struct pv_page_info pvinfo;
178 struct pv_entry pvents[PVE_PER_PVPAGE];
179 };
180
181 /*
182 * pv_entrys are dynamically allocated in chunks from a single page.
183 * we keep track of how many pv_entrys are in use for each page and
184 * we can free pv_entry pages if needed. There is one lock for the
185 * entire allocation system.
186 */
187
188 extern char PTD[];
189 extern struct pmap kernel_pmap_store; /* kernel pmap */
190 extern int nkptp_max;
191
192 #define PMAP_REMOVE_ALL 0
193 #define PMAP_REMOVE_SKIPWIRED 1
194
195 extern struct pool pmap_pv_pool;
196
197 /*
198 * Macros
199 */
200
201 #define pmap_kernel() (&kernel_pmap_store)
202 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
203 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
204 #define pmap_update(pm) /* nada */
205
206 #define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M)
207 #define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U)
208 #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
209 #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
210 #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
211
212 #define pmap_proc_iflush(p,va,len) /* nothing */
213 #define pmap_init_percpu() do { /* nothing */ } while (0)
214 #define pmap_unuse_final(p) /* nothing */
215 #define pmap_remove_holes(vm) do { /* nothing */ } while (0)
216
217 /*
218 * Prototypes
219 */
220
221 vaddr_t pmap_tmpmap_pa_86(paddr_t);
222 vaddr_t pmap_tmpmap_pa(paddr_t);
223 void pmap_tmpunmap_pa_86(void);
224 void pmap_tmpunmap_pa(void);
225
226 void pmap_bootstrap(vaddr_t);
227 void pmap_bootstrap_pae(void);
228 void pmap_virtual_space(vaddr_t *, vaddr_t *);
229 void pmap_init(void);
230 struct pmap *pmap_create(void);
231 void pmap_destroy(struct pmap *);
232 void pmap_reference(struct pmap *);
233 void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
234 void pmap_collect(struct pmap *);
235 void pmap_activate(struct proc *);
236 void pmap_deactivate(struct proc *);
237 void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t);
238 void pmap_kremove(vaddr_t, vsize_t);
239 void pmap_zero_page(struct vm_page *);
240 void pmap_copy_page(struct vm_page *, struct vm_page *);
241 void pmap_enter_pv(struct vm_page *, struct pv_entry *,
242 struct pmap *, vaddr_t, struct vm_page *);
243 int pmap_clear_attrs(struct vm_page *, int);
244 static void pmap_page_protect(struct vm_page *, vm_prot_t);
245 void pmap_page_remove(struct vm_page *);
246 static void pmap_protect(struct pmap *, vaddr_t,
247 vaddr_t, vm_prot_t);
248 void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
249 int pmap_test_attrs(struct vm_page *, int);
250 void pmap_write_protect(struct pmap *, vaddr_t,
251 vaddr_t, vm_prot_t);
252 int pmap_exec_fixup(struct vm_map *, struct trapframe *,
253 vaddr_t, struct pcb *);
254 void pmap_exec_account(struct pmap *, vaddr_t, u_int32_t,
255 u_int32_t);
256 struct pv_entry *pmap_remove_pv(struct vm_page *, struct pmap *, vaddr_t);
257 void pmap_apte_flush(void);
258 void pmap_switch(struct proc *, struct proc *);
259 vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
260 paddr_t vtophys(vaddr_t va);
261 paddr_t vtophys_pae(vaddr_t va);
262
263 extern u_int32_t (*pmap_pte_set_p)(vaddr_t, paddr_t, u_int32_t);
264 extern u_int32_t (*pmap_pte_setbits_p)(vaddr_t, u_int32_t, u_int32_t);
265 extern u_int32_t (*pmap_pte_bits_p)(vaddr_t);
266 extern paddr_t (*pmap_pte_paddr_p)(vaddr_t);
267 extern int (*pmap_clear_attrs_p)(struct vm_page *, int);
268 extern int (*pmap_enter_p)(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
269 extern void (*pmap_enter_special_p)(vaddr_t, paddr_t, vm_prot_t, u_int32_t);
270 extern int (*pmap_extract_p)(pmap_t, vaddr_t, paddr_t *);
271 extern vaddr_t (*pmap_growkernel_p)(vaddr_t);
272 extern void (*pmap_page_remove_p)(struct vm_page *);
273 extern void (*pmap_do_remove_p)(struct pmap *, vaddr_t, vaddr_t, int);
274 extern int (*pmap_test_attrs_p)(struct vm_page *, int);
275 extern void (*pmap_unwire_p)(struct pmap *, vaddr_t);
276 extern void (*pmap_write_protect_p)(struct pmap*, vaddr_t, vaddr_t, vm_prot_t);
277 extern void (*pmap_pinit_pd_p)(pmap_t);
278 extern void (*pmap_zero_phys_p)(paddr_t);
279 extern void (*pmap_copy_page_p)(struct vm_page *, struct vm_page *);
280
281 u_int32_t pmap_pte_set_pae(vaddr_t, paddr_t, u_int32_t);
282 u_int32_t pmap_pte_setbits_pae(vaddr_t, u_int32_t, u_int32_t);
283 u_int32_t pmap_pte_bits_pae(vaddr_t);
284 paddr_t pmap_pte_paddr_pae(vaddr_t);
285 int pmap_clear_attrs_pae(struct vm_page *, int);
286 int pmap_enter_pae(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
287 void pmap_enter_special_pae(vaddr_t, paddr_t, vm_prot_t, u_int32_t);
288 int pmap_extract_pae(pmap_t, vaddr_t, paddr_t *);
289 vaddr_t pmap_growkernel_pae(vaddr_t);
290 void pmap_page_remove_pae(struct vm_page *);
291 void pmap_do_remove_pae(struct pmap *, vaddr_t, vaddr_t, int);
292 int pmap_test_attrs_pae(struct vm_page *, int);
293 void pmap_unwire_pae(struct pmap *, vaddr_t);
294 void pmap_write_protect_pae(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
295 void pmap_pinit_pd_pae(pmap_t);
296 void pmap_zero_phys_pae(paddr_t);
297 void pmap_copy_page_pae(struct vm_page *, struct vm_page *);
298
299 #define pmap_pte_set (*pmap_pte_set_p)
300 #define pmap_pte_setbits (*pmap_pte_setbits_p)
301 #define pmap_pte_bits (*pmap_pte_bits_p)
302 #define pmap_pte_paddr (*pmap_pte_paddr_p)
303 #define pmap_clear_attrs (*pmap_clear_attrs_p)
304 #define pmap_page_remove (*pmap_page_remove_p)
305 #define pmap_do_remove (*pmap_do_remove_p)
306 #define pmap_test_attrs (*pmap_test_attrs_p)
307 #define pmap_unwire (*pmap_unwire_p)
308 #define pmap_write_protect (*pmap_write_protect_p)
309 #define pmap_pinit_pd (*pmap_pinit_pd_p)
310 #define pmap_zero_phys (*pmap_zero_phys_p)
311 #define pmap_copy_page (*pmap_copy_page_p)
312
313 u_int32_t pmap_pte_set_86(vaddr_t, paddr_t, u_int32_t);
314 u_int32_t pmap_pte_setbits_86(vaddr_t, u_int32_t, u_int32_t);
315 u_int32_t pmap_pte_bits_86(vaddr_t);
316 paddr_t pmap_pte_paddr_86(vaddr_t);
317 int pmap_clear_attrs_86(struct vm_page *, int);
318 int pmap_enter_86(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
319 void pmap_enter_special_86(vaddr_t, paddr_t, vm_prot_t, u_int32_t);
320 int pmap_extract_86(pmap_t, vaddr_t, paddr_t *);
321 vaddr_t pmap_growkernel_86(vaddr_t);
322 void pmap_page_remove_86(struct vm_page *);
323 void pmap_do_remove_86(struct pmap *, vaddr_t, vaddr_t, int);
324 int pmap_test_attrs_86(struct vm_page *, int);
325 void pmap_unwire_86(struct pmap *, vaddr_t);
326 void pmap_write_protect_86(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
327 void pmap_pinit_pd_86(pmap_t);
328 void pmap_zero_phys_86(paddr_t);
329 void pmap_copy_page_86(struct vm_page *, struct vm_page *);
330 void pmap_tlb_shootpage(struct pmap *, vaddr_t);
331 void pmap_tlb_shootrange(struct pmap *, vaddr_t, vaddr_t);
332 void pmap_tlb_shoottlb(void);
333 #ifdef MULTIPROCESSOR
334 void pmap_tlb_droppmap(struct pmap *);
335 void pmap_tlb_shootwait(void);
336 #else
337 #define pmap_tlb_shootwait()
338 #endif
339
340 void pmap_prealloc_lowmem_ptp(void);
341 void pmap_prealloc_lowmem_ptp_pae(void);
342 vaddr_t pmap_tmpmap_pa(paddr_t);
343 void pmap_tmpunmap_pa(void);
344 vaddr_t pmap_tmpmap_pa_pae(paddr_t);
345 void pmap_tmpunmap_pa_pae(void);
346
347
348 /*
349 * functions for flushing the cache for vaddrs and pages.
350 * these functions are not part of the MI pmap interface and thus
351 * should not be used as such.
352 */
353 void pmap_flush_cache(vaddr_t, vsize_t);
354 void pmap_flush_page(paddr_t);
355 void pmap_flush_page_pae(paddr_t);
356
357 #define PMAP_CHECK_COPYIN 1
358
359 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
360
361 /*
362 * Inline functions
363 */
364
365 /*
366 * pmap_update_pg: flush one page from the TLB (or flush the whole thing
367 * if hardware doesn't support one-page flushing)
368 */
369
370 #define pmap_update_pg(va) invlpg((u_int)(va))
371
372 /*
373 * pmap_update_2pg: flush two pages from the TLB
374 */
375
376 #define pmap_update_2pg(va, vb) { invlpg((u_int)(va)); invlpg((u_int)(vb)); }
377
378 /*
379 * pmap_page_protect: change the protection of all recorded mappings
380 * of a managed page
381 *
382 * => This function is a front end for pmap_page_remove/pmap_clear_attrs
383 * => We only have to worry about making the page more protected.
384 * Unprotecting a page is done on-demand at fault time.
385 */
386
387 static __inline void
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)388 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
389 {
390 if ((prot & PROT_WRITE) == 0) {
391 if (prot & (PROT_READ | PROT_EXEC)) {
392 (void) pmap_clear_attrs(pg, PG_RW);
393 } else {
394 pmap_page_remove(pg);
395 }
396 }
397 }
398
399 /*
400 * pmap_protect: change the protection of pages in a pmap
401 *
402 * => This function is a front end for pmap_remove/pmap_write_protect.
403 * => We only have to worry about making the page more protected.
404 * Unprotecting a page is done on-demand at fault time.
405 */
406
407 static __inline void
pmap_protect(struct pmap * pmap,vaddr_t sva,vaddr_t eva,vm_prot_t prot)408 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
409 {
410 if ((prot & PROT_WRITE) == 0) {
411 if (prot & (PROT_READ | PROT_EXEC)) {
412 pmap_write_protect(pmap, sva, eva, prot);
413 } else {
414 pmap_remove(pmap, sva, eva);
415 }
416 }
417 }
418
419 /*
420 * pmap_growkernel, pmap_enter, and pmap_extract get picked up in various
421 * modules from both uvm_pmap.h and pmap.h. Since uvm_pmap.h defines these
422 * as functions, inline them here to suppress linker warnings.
423 */
424 static __inline vaddr_t
pmap_growkernel(vaddr_t maxkvaddr)425 pmap_growkernel(vaddr_t maxkvaddr)
426 {
427 return (*pmap_growkernel_p)(maxkvaddr);
428 }
429
430 static __inline int
pmap_enter(struct pmap * pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,int flags)431 pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
432 {
433 return (*pmap_enter_p)(pmap, va, pa, prot, flags);
434 }
435
436 static __inline void
pmap_enter_special(vaddr_t va,paddr_t pa,vm_prot_t prot,u_int32_t flags)437 pmap_enter_special(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int32_t flags)
438 {
439 (*pmap_enter_special_p)(va, pa, prot, flags);
440 }
441
442 static __inline int
pmap_extract(struct pmap * pmap,vaddr_t va,paddr_t * pa)443 pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pa)
444 {
445 return (*pmap_extract_p)(pmap, va, pa);
446 }
447
448 /*
449 * p m a p i n l i n e h e l p e r f u n c t i o n s
450 */
451
452 /*
453 * pmap_is_active: is this pmap loaded into the specified processor's %cr3?
454 */
455
456 static __inline int
pmap_is_active(struct pmap * pmap,struct cpu_info * ci)457 pmap_is_active(struct pmap *pmap, struct cpu_info *ci)
458 {
459 return (pmap == pmap_kernel() || ci->ci_curpmap == pmap);
460 }
461
462 static __inline int
pmap_is_curpmap(struct pmap * pmap)463 pmap_is_curpmap(struct pmap *pmap)
464 {
465 return (pmap_is_active(pmap, curcpu()));
466 }
467
468 #endif /* _KERNEL */
469
470 struct pv_entry;
471 struct vm_page_md {
472 struct mutex pv_mtx;
473 struct pv_entry *pv_list;
474 };
475
476 #define VM_MDPAGE_INIT(pg) do { \
477 mtx_init(&(pg)->mdpage.pv_mtx, IPL_VM); \
478 (pg)->mdpage.pv_list = NULL; \
479 } while (0)
480
481 #endif /* _MACHINE_PMAP_H_ */
482