xref: /openbsd/sys/arch/i386/include/pmap.h (revision 4fb9ab68)
1 /*	$OpenBSD: pmap.h,v 1.95 2024/06/18 12:37:29 jsg Exp $	*/
2 /*	$NetBSD: pmap.h,v 1.44 2000/04/24 17:18:18 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * pmap.h: see pmap.c for the history of this pmap module.
31  */
32 
33 #ifndef	_MACHINE_PMAP_H_
34 #define	_MACHINE_PMAP_H_
35 
36 #ifdef _KERNEL
37 #include <machine/cpufunc.h>
38 #include <machine/segments.h>
39 #endif
40 #include <sys/mutex.h>
41 #include <uvm/uvm_object.h>
42 #include <machine/pte.h>
43 
44 #define	PDSLOT_PTE	((KERNBASE/NBPD)-2) /* 830: for recursive PDP map */
45 #define	PDSLOT_KERN	(KERNBASE/NBPD) /* 832: start of kernel space */
46 #define	PDSLOT_APTE	((unsigned)1022) /* 1022: alternative recursive slot */
47 
48 /*
49  * The following define determines how many PTPs should be set up for the
50  * kernel by locore.s at boot time.  This should be large enough to
51  * get the VM system running.  Once the VM system is running, the
52  * pmap module can add more PTPs to the kernel area on demand.
53  */
54 
55 #ifndef	NKPTP
56 #define	NKPTP		8	/* 16/32MB to start */
57 #endif
58 #define	NKPTP_MIN	4	/* smallest value we allow */
59 
60 /*
61  * PG_AVAIL usage: we make use of the ignored bits of the PTE
62  */
63 
64 #define PG_W		PG_AVAIL1	/* "wired" mapping */
65 #define PG_PVLIST	PG_AVAIL2	/* mapping has entry on pvlist */
66 #define	PG_X		PG_AVAIL3	/* executable mapping */
67 
68 #define PTP0_PA             (PAGE_SIZE * 3)
69 
70 #ifdef _KERNEL
71 /*
72  * pmap data structures: see pmap.c for details of locking.
73  */
74 
75 struct pmap;
76 typedef struct pmap *pmap_t;
77 
78 /*
79  * We maintain a list of all non-kernel pmaps.
80  */
81 
82 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
83 
84 /*
85  * The pmap structure
86  *
87  * Note that the pm_obj contains the reference count,
88  * page list, and number of PTPs within the pmap.
89  */
90 
91 struct pmap {
92 	uint64_t pm_pdidx[4];		/* PDIEs for PAE mode */
93 	uint64_t pm_pdidx_intel[4];	/* PDIEs for PAE mode U-K */
94 
95 	struct mutex pm_mtx;
96 	struct mutex pm_apte_mtx;
97 
98 	/*
99 	 * pm_pdir		: VA of PD when executing in privileged mode
100 	 *			  (lock by objeckt lock)
101 	 * pm_pdirpa		: PA of PD when executing in privileged mode,
102 	 *			  (read-only after create)
103 	 * pm_pdir_intel	: VA of PD when executing on Intel CPU in
104 	 *			  usermode (no kernel mappings)
105 	 * pm_pdirpa_intel	: PA of PD when executing on Intel CPU in
106 	 *			  usermode (no kernel mappings)
107 	 */
108 	paddr_t pm_pdirpa, pm_pdirpa_intel;
109 	vaddr_t pm_pdir, pm_pdir_intel;
110 	int	pm_pdirsize;		/* PD size (4k vs 16k on PAE) */
111 	struct uvm_object pm_obj;	/* object (lck by object lock) */
112 	LIST_ENTRY(pmap) pm_list;	/* list (lck by pm_list lock) */
113 	struct vm_page *pm_ptphint;	/* pointer to a PTP in our pmap */
114 	struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
115 
116 	vaddr_t pm_hiexec;		/* highest executable mapping */
117 	int pm_flags;			/* see below */
118 
119 	struct segment_descriptor pm_codeseg;	/* cs descriptor for process */
120 };
121 
122 /*
123  * For each managed physical page we maintain a list of <PMAP,VA>s
124  * which it is mapped at.  The list is headed by a pv_head structure.
125  * there is one pv_head per managed phys page (allocated at boot time).
126  * The pv_head structure points to a list of pv_entry structures (each
127  * describes one mapping).
128  */
129 
130 struct pv_entry {			/* locked by its list's pvh_lock */
131 	struct pv_entry *pv_next;	/* next entry */
132 	struct pmap *pv_pmap;		/* the pmap */
133 	vaddr_t pv_va;			/* the virtual address */
134 	struct vm_page *pv_ptp;		/* the vm_page of the PTP */
135 };
136 /*
137  * MD flags to pmap_enter:
138  */
139 
140 /* to get just the pa from params to pmap_enter */
141 #define PMAP_PA_MASK	~((paddr_t)PAGE_MASK)
142 #define	PMAP_NOCACHE	0x1		/* map uncached */
143 #define	PMAP_WC		0x2		/* map write combining. */
144 
145 /*
146  * We keep mod/ref flags in struct vm_page->pg_flags.
147  */
148 #define	PG_PMAP_MOD	PG_PMAP0
149 #define	PG_PMAP_REF	PG_PMAP1
150 #define	PG_PMAP_WC	PG_PMAP2
151 
152 /*
153  * pv_entrys are dynamically allocated in chunks from a single page.
154  * we keep track of how many pv_entrys are in use for each page and
155  * we can free pv_entry pages if needed.  There is one lock for the
156  * entire allocation system.
157  */
158 
159 struct pv_page_info {
160 	TAILQ_ENTRY(pv_page) pvpi_list;
161 	struct pv_entry *pvpi_pvfree;
162 	int pvpi_nfree;
163 };
164 
165 /*
166  * number of pv_entries in a pv_page
167  */
168 
169 #define PVE_PER_PVPAGE ((PAGE_SIZE - sizeof(struct pv_page_info)) / \
170 			sizeof(struct pv_entry))
171 
172 /*
173  * a pv_page: where pv_entrys are allocated from
174  */
175 
176 struct pv_page {
177 	struct pv_page_info pvinfo;
178 	struct pv_entry pvents[PVE_PER_PVPAGE];
179 };
180 
181 /*
182  * pv_entrys are dynamically allocated in chunks from a single page.
183  * we keep track of how many pv_entrys are in use for each page and
184  * we can free pv_entry pages if needed.  There is one lock for the
185  * entire allocation system.
186  */
187 
188 extern char PTD[];
189 extern struct pmap kernel_pmap_store; /* kernel pmap */
190 extern int nkptp_max;
191 
192 #define PMAP_REMOVE_ALL 0
193 #define PMAP_REMOVE_SKIPWIRED 1
194 
195 extern struct pool pmap_pv_pool;
196 
197 /*
198  * Macros
199  */
200 
201 #define	pmap_kernel()			(&kernel_pmap_store)
202 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
203 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
204 #define	pmap_update(pm)			/* nada */
205 
206 #define pmap_clear_modify(pg)		pmap_clear_attrs(pg, PG_M)
207 #define pmap_clear_reference(pg)	pmap_clear_attrs(pg, PG_U)
208 #define pmap_is_modified(pg)		pmap_test_attrs(pg, PG_M)
209 #define pmap_is_referenced(pg)		pmap_test_attrs(pg, PG_U)
210 #define pmap_valid_entry(E) 		((E) & PG_V) /* is PDE or PTE valid? */
211 
212 #define pmap_proc_iflush(p,va,len)	/* nothing */
213 #define pmap_init_percpu()		do { /* nothing */ } while (0)
214 #define pmap_unuse_final(p)		/* nothing */
215 #define	pmap_remove_holes(vm)		do { /* nothing */ } while (0)
216 
217 /*
218  * Prototypes
219  */
220 
221 vaddr_t pmap_tmpmap_pa_86(paddr_t);
222 vaddr_t pmap_tmpmap_pa(paddr_t);
223 void pmap_tmpunmap_pa_86(void);
224 void pmap_tmpunmap_pa(void);
225 
226 void pmap_bootstrap(vaddr_t);
227 void pmap_bootstrap_pae(void);
228 void pmap_virtual_space(vaddr_t *, vaddr_t *);
229 void pmap_init(void);
230 struct pmap *pmap_create(void);
231 void pmap_destroy(struct pmap *);
232 void pmap_reference(struct pmap *);
233 void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
234 void pmap_collect(struct pmap *);
235 void pmap_activate(struct proc *);
236 void pmap_deactivate(struct proc *);
237 void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t);
238 void pmap_kremove(vaddr_t, vsize_t);
239 void pmap_zero_page(struct vm_page *);
240 void pmap_copy_page(struct vm_page *, struct vm_page *);
241 void pmap_enter_pv(struct vm_page *, struct pv_entry *,
242     struct pmap *, vaddr_t, struct vm_page *);
243 int pmap_clear_attrs(struct vm_page *, int);
244 static void pmap_page_protect(struct vm_page *, vm_prot_t);
245 void pmap_page_remove(struct vm_page *);
246 static void pmap_protect(struct pmap *, vaddr_t,
247     vaddr_t, vm_prot_t);
248 void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
249 int pmap_test_attrs(struct vm_page *, int);
250 void pmap_write_protect(struct pmap *, vaddr_t,
251     vaddr_t, vm_prot_t);
252 int pmap_exec_fixup(struct vm_map *, struct trapframe *,
253     vaddr_t, struct pcb *);
254 void pmap_exec_account(struct pmap *, vaddr_t, u_int32_t,
255     u_int32_t);
256 struct pv_entry *pmap_remove_pv(struct vm_page *, struct pmap *, vaddr_t);
257 void pmap_apte_flush(void);
258 void pmap_switch(struct proc *, struct proc *);
259 vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
260 paddr_t vtophys(vaddr_t va);
261 paddr_t vtophys_pae(vaddr_t va);
262 
263 extern u_int32_t (*pmap_pte_set_p)(vaddr_t, paddr_t, u_int32_t);
264 extern u_int32_t (*pmap_pte_setbits_p)(vaddr_t, u_int32_t, u_int32_t);
265 extern u_int32_t (*pmap_pte_bits_p)(vaddr_t);
266 extern paddr_t (*pmap_pte_paddr_p)(vaddr_t);
267 extern int (*pmap_clear_attrs_p)(struct vm_page *, int);
268 extern int (*pmap_enter_p)(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
269 extern void (*pmap_enter_special_p)(vaddr_t, paddr_t, vm_prot_t, u_int32_t);
270 extern int (*pmap_extract_p)(pmap_t, vaddr_t, paddr_t *);
271 extern vaddr_t (*pmap_growkernel_p)(vaddr_t);
272 extern void (*pmap_page_remove_p)(struct vm_page *);
273 extern void (*pmap_do_remove_p)(struct pmap *, vaddr_t, vaddr_t, int);
274 extern int (*pmap_test_attrs_p)(struct vm_page *, int);
275 extern void (*pmap_unwire_p)(struct pmap *, vaddr_t);
276 extern void (*pmap_write_protect_p)(struct pmap*, vaddr_t, vaddr_t, vm_prot_t);
277 extern void (*pmap_pinit_pd_p)(pmap_t);
278 extern void (*pmap_zero_phys_p)(paddr_t);
279 extern int (*pmap_zero_page_uncached_p)(paddr_t);
280 extern void (*pmap_copy_page_p)(struct vm_page *, struct vm_page *);
281 
282 u_int32_t pmap_pte_set_pae(vaddr_t, paddr_t, u_int32_t);
283 u_int32_t pmap_pte_setbits_pae(vaddr_t, u_int32_t, u_int32_t);
284 u_int32_t pmap_pte_bits_pae(vaddr_t);
285 paddr_t pmap_pte_paddr_pae(vaddr_t);
286 int pmap_clear_attrs_pae(struct vm_page *, int);
287 int pmap_enter_pae(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
288 void pmap_enter_special_pae(vaddr_t, paddr_t, vm_prot_t, u_int32_t);
289 int pmap_extract_pae(pmap_t, vaddr_t, paddr_t *);
290 vaddr_t pmap_growkernel_pae(vaddr_t);
291 void pmap_page_remove_pae(struct vm_page *);
292 void pmap_do_remove_pae(struct pmap *, vaddr_t, vaddr_t, int);
293 int pmap_test_attrs_pae(struct vm_page *, int);
294 void pmap_unwire_pae(struct pmap *, vaddr_t);
295 void pmap_write_protect_pae(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
296 void pmap_pinit_pd_pae(pmap_t);
297 void pmap_zero_phys_pae(paddr_t);
298 int pmap_zero_page_uncached_pae(paddr_t);
299 void pmap_copy_page_pae(struct vm_page *, struct vm_page *);
300 
301 #define	pmap_pte_set		(*pmap_pte_set_p)
302 #define	pmap_pte_setbits	(*pmap_pte_setbits_p)
303 #define	pmap_pte_bits		(*pmap_pte_bits_p)
304 #define	pmap_pte_paddr		(*pmap_pte_paddr_p)
305 #define	pmap_clear_attrs	(*pmap_clear_attrs_p)
306 #define	pmap_page_remove	(*pmap_page_remove_p)
307 #define	pmap_do_remove		(*pmap_do_remove_p)
308 #define	pmap_test_attrs		(*pmap_test_attrs_p)
309 #define	pmap_unwire		(*pmap_unwire_p)
310 #define	pmap_write_protect	(*pmap_write_protect_p)
311 #define	pmap_pinit_pd		(*pmap_pinit_pd_p)
312 #define	pmap_zero_phys		(*pmap_zero_phys_p)
313 #define	pmap_zero_page_uncached	(*pmap_zero_page_uncached_p)
314 #define	pmap_copy_page		(*pmap_copy_page_p)
315 
316 u_int32_t pmap_pte_set_86(vaddr_t, paddr_t, u_int32_t);
317 u_int32_t pmap_pte_setbits_86(vaddr_t, u_int32_t, u_int32_t);
318 u_int32_t pmap_pte_bits_86(vaddr_t);
319 paddr_t pmap_pte_paddr_86(vaddr_t);
320 int pmap_clear_attrs_86(struct vm_page *, int);
321 int pmap_enter_86(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
322 void pmap_enter_special_86(vaddr_t, paddr_t, vm_prot_t, u_int32_t);
323 int pmap_extract_86(pmap_t, vaddr_t, paddr_t *);
324 vaddr_t pmap_growkernel_86(vaddr_t);
325 void pmap_page_remove_86(struct vm_page *);
326 void pmap_do_remove_86(struct pmap *, vaddr_t, vaddr_t, int);
327 int pmap_test_attrs_86(struct vm_page *, int);
328 void pmap_unwire_86(struct pmap *, vaddr_t);
329 void pmap_write_protect_86(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
330 void pmap_pinit_pd_86(pmap_t);
331 void pmap_zero_phys_86(paddr_t);
332 int pmap_zero_page_uncached_86(paddr_t);
333 void pmap_copy_page_86(struct vm_page *, struct vm_page *);
334 void pmap_tlb_shootpage(struct pmap *, vaddr_t);
335 void pmap_tlb_shootrange(struct pmap *, vaddr_t, vaddr_t);
336 void pmap_tlb_shoottlb(void);
337 #ifdef MULTIPROCESSOR
338 void pmap_tlb_droppmap(struct pmap *);
339 void pmap_tlb_shootwait(void);
340 #else
341 #define pmap_tlb_shootwait()
342 #endif
343 
344 void pmap_prealloc_lowmem_ptp(void);
345 void pmap_prealloc_lowmem_ptp_pae(void);
346 vaddr_t pmap_tmpmap_pa(paddr_t);
347 void pmap_tmpunmap_pa(void);
348 vaddr_t pmap_tmpmap_pa_pae(paddr_t);
349 void pmap_tmpunmap_pa_pae(void);
350 
351 
352 /*
353  * functions for flushing the cache for vaddrs and pages.
354  * these functions are not part of the MI pmap interface and thus
355  * should not be used as such.
356  */
357 void pmap_flush_cache(vaddr_t, vsize_t);
358 void pmap_flush_page(paddr_t);
359 void pmap_flush_page_pae(paddr_t);
360 
361 #define PMAP_CHECK_COPYIN	1
362 
363 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
364 
365 /*
366  * Do idle page zero'ing uncached to avoid polluting the cache.
367  */
368 #define	PMAP_PAGEIDLEZERO(pg)	pmap_zero_page_uncached(VM_PAGE_TO_PHYS(pg))
369 
370 /*
371  * Inline functions
372  */
373 
374 /*
375  * pmap_update_pg: flush one page from the TLB (or flush the whole thing
376  *	if hardware doesn't support one-page flushing)
377  */
378 
379 #define pmap_update_pg(va)	invlpg((u_int)(va))
380 
381 /*
382  * pmap_update_2pg: flush two pages from the TLB
383  */
384 
385 #define pmap_update_2pg(va, vb) { invlpg((u_int)(va)); invlpg((u_int)(vb)); }
386 
387 /*
388  * pmap_page_protect: change the protection of all recorded mappings
389  *	of a managed page
390  *
391  * => This function is a front end for pmap_page_remove/pmap_clear_attrs
392  * => We only have to worry about making the page more protected.
393  *	Unprotecting a page is done on-demand at fault time.
394  */
395 
396 static __inline void
397 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
398 {
399 	if ((prot & PROT_WRITE) == 0) {
400 		if (prot & (PROT_READ | PROT_EXEC)) {
401 			(void) pmap_clear_attrs(pg, PG_RW);
402 		} else {
403 			pmap_page_remove(pg);
404 		}
405 	}
406 }
407 
408 /*
409  * pmap_protect: change the protection of pages in a pmap
410  *
411  * => This function is a front end for pmap_remove/pmap_write_protect.
412  * => We only have to worry about making the page more protected.
413  *	Unprotecting a page is done on-demand at fault time.
414  */
415 
416 static __inline void
417 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
418 {
419 	if ((prot & PROT_WRITE) == 0) {
420 		if (prot & (PROT_READ | PROT_EXEC)) {
421 			pmap_write_protect(pmap, sva, eva, prot);
422 		} else {
423 			pmap_remove(pmap, sva, eva);
424 		}
425 	}
426 }
427 
428 /*
429  * pmap_growkernel, pmap_enter, and pmap_extract get picked up in various
430  * modules from both uvm_pmap.h and pmap.h. Since uvm_pmap.h defines these
431  * as functions, inline them here to suppress linker warnings.
432  */
433 static __inline vaddr_t
434 pmap_growkernel(vaddr_t maxkvaddr)
435 {
436 	return (*pmap_growkernel_p)(maxkvaddr);
437 }
438 
439 static __inline int
440 pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
441 {
442 	return (*pmap_enter_p)(pmap, va, pa, prot, flags);
443 }
444 
445 static __inline void
446 pmap_enter_special(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int32_t flags)
447 {
448 	(*pmap_enter_special_p)(va, pa, prot, flags);
449 }
450 
451 static __inline int
452 pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pa)
453 {
454 	return (*pmap_extract_p)(pmap, va, pa);
455 }
456 
457 /*
458  * p m a p   i n l i n e   h e l p e r   f u n c t i o n s
459  */
460 
461 /*
462  * pmap_is_active: is this pmap loaded into the specified processor's %cr3?
463  */
464 
465 static __inline int
466 pmap_is_active(struct pmap *pmap, struct cpu_info *ci)
467 {
468 	return (pmap == pmap_kernel() || ci->ci_curpmap == pmap);
469 }
470 
471 static __inline int
472 pmap_is_curpmap(struct pmap *pmap)
473 {
474 	return (pmap_is_active(pmap, curcpu()));
475 }
476 
477 #endif /* _KERNEL */
478 
479 struct pv_entry;
480 struct vm_page_md {
481 	struct mutex pv_mtx;
482 	struct pv_entry *pv_list;
483 };
484 
485 #define VM_MDPAGE_INIT(pg) do {			\
486 	mtx_init(&(pg)->mdpage.pv_mtx, IPL_VM); \
487 	(pg)->mdpage.pv_list = NULL;	\
488 } while (0)
489 
490 #endif	/* _MACHINE_PMAP_H_ */
491