xref: /openbsd/sys/arch/arm/include/pmap.h (revision aaa8efb3)
1 /*	$OpenBSD: pmap.h,v 1.57 2024/11/07 08:12:12 miod Exp $	*/
2 /*	$NetBSD: pmap.h,v 1.76 2003/09/06 09:10:46 rearnsha Exp $	*/
3 
4 /*
5  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
6  * All rights reserved.
7  *
8  * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed for the NetBSD Project by
21  *	Wasabi Systems, Inc.
22  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23  *    or promote products derived from this software without specific prior
24  *    written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Copyright (c) 1994,1995 Mark Brinicombe.
41  * All rights reserved.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed by Mark Brinicombe
54  * 4. The name of the author may not be used to endorse or promote products
55  *    derived from this software without specific prior written permission.
56  *
57  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
58  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
61  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
62  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
63  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
64  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
65  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
66  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67  */
68 
69 #ifndef	_ARM_PMAP_H_
70 #define	_ARM_PMAP_H_
71 
72 #ifdef _KERNEL
73 
74 #include <arm/cpuconf.h>
75 #include <arm/pte.h>
76 #ifndef _LOCORE
77 #include <arm/cpufunc.h>
78 #endif
79 
80 /*
81  * a pmap describes a processes' 4GB virtual address space.  this
82  * virtual address space can be broken up into 4096 1MB regions which
83  * are described by L1 PTEs in the L1 table.
84  *
85  * There is a line drawn at KERNEL_BASE.  Everything below that line
86  * changes when the VM context is switched.  Everything above that line
87  * is the same no matter which VM context is running.  This is achieved
88  * by making the L1 PTEs for those slots above KERNEL_BASE reference
89  * kernel L2 tables.
90  *
91  * The basic layout of the virtual address space thus looks like this:
92  *
93  *	0xffffffff
94  *	.
95  *	.
96  *	.
97  *	KERNEL_BASE
98  *	--------------------
99  *	.
100  *	.
101  *	.
102  *	0x00000000
103  */
104 
105 /*
106  * The number of L2 descriptor tables which can be tracked by an l2_dtable.
107  * A bucket size of 16 provides for 16MB of contiguous virtual address
108  * space per l2_dtable. Most processes will, therefore, require only two or
109  * three of these to map their whole working set.
110  */
111 #define	L2_BUCKET_LOG2	4
112 #define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
113 
114 /*
115  * Given the above "L2-descriptors-per-l2_dtable" constant, the number
116  * of l2_dtable structures required to track all possible page descriptors
117  * mappable by an L1 translation table is given by the following constants:
118  */
119 #define	L2_LOG2		((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
120 #define	L2_SIZE		(1 << L2_LOG2)
121 
122 #ifndef _LOCORE
123 
124 struct l1_ttable;
125 struct l2_dtable;
126 
127 /*
128  * Track cache/tlb occupancy using the following structure
129  */
130 union pmap_cache_state {
131 	struct {
132 		union {
133 			u_int8_t csu_cache_b[2];
134 			u_int16_t csu_cache;
135 		} cs_cache_u;
136 
137 		union {
138 			u_int8_t csu_tlb_b[2];
139 			u_int16_t csu_tlb;
140 		} cs_tlb_u;
141 	} cs_s;
142 	u_int32_t cs_all;
143 };
144 #define	cs_cache_id	cs_s.cs_cache_u.csu_cache_b[0]
145 #define	cs_cache_d	cs_s.cs_cache_u.csu_cache_b[1]
146 #define	cs_cache	cs_s.cs_cache_u.csu_cache
147 #define	cs_tlb_id	cs_s.cs_tlb_u.csu_tlb_b[0]
148 #define	cs_tlb_d	cs_s.cs_tlb_u.csu_tlb_b[1]
149 #define	cs_tlb		cs_s.cs_tlb_u.csu_tlb
150 
151 /*
152  * Assigned to cs_all to force cacheops to work for a particular pmap
153  */
154 #define	PMAP_CACHE_STATE_ALL	0xffffffffu
155 
156 /*
157  * The pmap structure itself
158  */
159 struct pmap {
160 	u_int8_t		pm_domain;
161 	int			pm_remove_all;
162 	struct l1_ttable	*pm_l1;
163 	union pmap_cache_state	pm_cstate;
164 	u_int			pm_refs;
165 	struct l2_dtable	*pm_l2[L2_SIZE];
166 	struct pmap_statistics	pm_stats;
167 };
168 
169 typedef struct pmap *pmap_t;
170 
171 /*
172  * MD flags that we use for pmap_enter (in the pa):
173  */
174 #define PMAP_PA_MASK	~((paddr_t)PAGE_MASK) /* to remove the flags */
175 #define PMAP_NOCACHE	0x1 /* non-cacheable memory. */
176 #define PMAP_DEVICE	0x2 /* device memory. */
177 
178 /*
179  * Physical / virtual address structure. In a number of places (particularly
180  * during bootstrapping) we need to keep track of the physical and virtual
181  * addresses of various pages
182  */
183 typedef struct pv_addr {
184 	SLIST_ENTRY(pv_addr) pv_list;
185 	paddr_t pv_pa;
186 	vaddr_t pv_va;
187 } pv_addr_t;
188 
189 /*
190  * Determine various modes for PTEs (user vs. kernel, cacheable
191  * vs. non-cacheable).
192  */
193 #define	PTE_KERNEL	0
194 #define	PTE_USER	1
195 #define	PTE_NOCACHE	0
196 #define	PTE_CACHE	1
197 #define	PTE_PAGETABLE	2
198 
199 /*
200  * Flags that indicate attributes of pages or mappings of pages.
201  *
202  * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
203  * page.  PVF_WIRED and PVF_WRITE are kept in individual pv_entry's
204  * for each page.  They live in the same "namespace" so that we can
205  * clear multiple attributes at a time.
206  */
207 #define	PVF_MOD		0x01		/* page is modified */
208 #define	PVF_REF		0x02		/* page is referenced */
209 #define	PVF_WIRED	0x04		/* mapping is wired */
210 #define	PVF_WRITE	0x08		/* mapping is writable */
211 #define	PVF_EXEC	0x10		/* mapping is executable */
212 
213 /*
214  * Commonly referenced structures
215  */
216 extern struct pmap	kernel_pmap_store;
217 
218 /*
219  * Macros that we need to export
220  */
221 #define pmap_kernel()			(&kernel_pmap_store)
222 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
223 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
224 
225 #define	pmap_is_modified(pg)	\
226 	(((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
227 #define	pmap_is_referenced(pg)	\
228 	(((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
229 
230 #define	pmap_deactivate(p)		do { /* nothing */ } while (0)
231 
232 #define pmap_init_percpu()		do { /* nothing */ } while (0)
233 #define pmap_unuse_final(p)		do { /* nothing */ } while (0)
234 #define	pmap_remove_holes(vm)		do { /* nothing */ } while (0)
235 
236 #define PMAP_CHECK_COPYIN	1
237 
238 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
239 
240 /* Functions we use internally. */
241 void	pmap_bootstrap(pd_entry_t *, vaddr_t, vaddr_t);
242 
243 int pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
244 int pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
245 void	pmap_set_pcb_pagedir(pmap_t, struct pcb *);
246 
247 void	pmap_postinit(void);
248 
249 void	vector_page_setprot(int);
250 
251 /* XXX */
252 void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable);
253 
254 /* Bootstrapping routines. */
255 void	pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
256 void	pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
257 vsize_t	pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
258 void	pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
259 
260 /*
261  * The current top of kernel VM
262  */
263 extern vaddr_t	pmap_curmaxkvaddr;
264 
265 /*
266  * Useful macros and constants
267  */
268 
269 /* Virtual address to page table entry */
270 static __inline pt_entry_t *
vtopte(vaddr_t va)271 vtopte(vaddr_t va)
272 {
273 	pd_entry_t *pdep;
274 	pt_entry_t *ptep;
275 
276 	if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
277 		return (NULL);
278 	return (ptep);
279 }
280 
281 /*
282  * Page tables are always mapped write-through.
283  * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
284  * on every change.
285  *
286  * Unfortunately, not all CPUs have a write-through cache mode.  So we
287  * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs.
288  */
289 extern int pmap_needs_pte_sync;
290 
291 #define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
292 
293 #define	PTE_SYNC(pte)							\
294 do {									\
295 	cpu_drain_writebuf();						\
296 	if (PMAP_NEEDS_PTE_SYNC) {					\
297 		paddr_t pa;						\
298 		cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\
299 		if (cpu_sdcache_enabled()) { 				\
300 		(void)pmap_extract(pmap_kernel(), (vaddr_t)(pte), &pa);	\
301 		cpu_sdcache_wb_range((vaddr_t)(pte), (paddr_t)(pa),	\
302 		    sizeof(pt_entry_t));				\
303 		};							\
304 		cpu_drain_writebuf();					\
305 	}								\
306 } while (/*CONSTCOND*/0)
307 
308 #define	PTE_SYNC_RANGE(pte, cnt)					\
309 do {									\
310 	cpu_drain_writebuf();						\
311 	if (PMAP_NEEDS_PTE_SYNC) {					\
312 		paddr_t pa;						\
313 		cpu_dcache_wb_range((vaddr_t)(pte),			\
314 		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
315 		if (cpu_sdcache_enabled()) { 				\
316 		(void)pmap_extract(pmap_kernel(), (vaddr_t)(pte), &pa);\
317 		cpu_sdcache_wb_range((vaddr_t)(pte), (paddr_t)(pa),	\
318 		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
319 		};							\
320 		cpu_drain_writebuf();					\
321 	}								\
322 } while (/*CONSTCOND*/0)
323 
324 #define	l1pte_valid(pde)	(((pde) & L1_TYPE_MASK) != L1_TYPE_INV)
325 #define	l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
326 #define	l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
327 #define	l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
328 
329 #define l2pte_index(v)		(((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
330 #define	l2pte_valid(pte)	(((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
331 #define	l2pte_pa(pte)		((pte) & L2_S_FRAME)
332 
333 /* L1 and L2 page table macros */
334 #define pmap_pde_v(pde)		l1pte_valid(*(pde))
335 #define pmap_pde_section(pde)	l1pte_section_p(*(pde))
336 #define pmap_pde_page(pde)	l1pte_page_p(*(pde))
337 #define pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
338 
339 /************************* ARM MMU configuration *****************************/
340 
341 void	pmap_pte_init_armv7(void);
342 
343 #endif /* !_LOCORE */
344 
345 /*****************************************************************************/
346 
347 /*
348  * Definitions for MMU domains
349  */
350 #define	PMAP_DOMAINS		15	/* 15 'user' domains (0-14) */
351 #define	PMAP_DOMAIN_KERNEL	15	/* The kernel uses domain #15 */
352 
353 /*
354  * These macros define the various bit masks in the PTE.
355  *
356  * We use these macros since we use different bits on different processor
357  * models.
358  */
359 #define	L1_S_PROT_UR_v7		(L1_S_V7_AP(AP_V7_KRUR))
360 #define	L1_S_PROT_UW_v7		(L1_S_V7_AP(AP_KRWURW))
361 #define	L1_S_PROT_KR_v7		(L1_S_V7_AP(AP_V7_KR))
362 #define	L1_S_PROT_KW_v7		(L1_S_V7_AP(AP_KRW))
363 #define	L1_S_PROT_MASK_v7	(L1_S_V7_AP(0x07))
364 
365 #define	L1_S_CACHE_MASK_v7	(L1_S_B|L1_S_C|L1_S_V7_TEX_MASK)
366 
367 #define	L1_S_COHERENT_v7	(L1_S_C)
368 
369 #define	L2_L_PROT_UR_v7		(L2_V7_AP(AP_V7_KRUR))
370 #define	L2_L_PROT_UW_v7		(L2_V7_AP(AP_KRWURW))
371 #define	L2_L_PROT_KR_v7		(L2_V7_AP(AP_V7_KR))
372 #define	L2_L_PROT_KW_v7		(L2_V7_AP(AP_KRW))
373 #define	L2_L_PROT_MASK_v7	(L2_V7_AP(0x07) | L2_V7_L_XN)
374 
375 #define	L2_L_CACHE_MASK_v7	(L2_B|L2_C|L2_V7_L_TEX_MASK)
376 
377 #define	L2_L_COHERENT_v7	(L2_C)
378 
379 #define	L2_S_PROT_UR_v7		(L2_V7_AP(AP_V7_KRUR))
380 #define	L2_S_PROT_UW_v7		(L2_V7_AP(AP_KRWURW))
381 #define	L2_S_PROT_KR_v7		(L2_V7_AP(AP_V7_KR))
382 #define	L2_S_PROT_KW_v7		(L2_V7_AP(AP_KRW))
383 #define	L2_S_PROT_MASK_v7	(L2_V7_AP(0x07) | L2_V7_S_XN)
384 
385 #define	L2_S_CACHE_MASK_v7	(L2_B|L2_C|L2_V7_S_TEX_MASK)
386 
387 #define	L2_S_COHERENT_v7	(L2_C)
388 
389 #define	L1_S_PROTO_v7		(L1_TYPE_S)
390 
391 #define	L1_C_PROTO_v7		(L1_TYPE_C)
392 
393 #define	L2_L_PROTO		(L2_TYPE_L)
394 
395 #define	L2_S_PROTO_v7		(L2_TYPE_S)
396 
397 #define	L1_S_PROT_UR		L1_S_PROT_UR_v7
398 #define	L1_S_PROT_UW		L1_S_PROT_UW_v7
399 #define	L1_S_PROT_KR		L1_S_PROT_KR_v7
400 #define	L1_S_PROT_KW		L1_S_PROT_KW_v7
401 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_v7
402 
403 #define	L2_L_PROT_UR		L2_L_PROT_UR_v7
404 #define	L2_L_PROT_UW		L2_L_PROT_UW_v7
405 #define	L2_L_PROT_KR		L2_L_PROT_KR_v7
406 #define	L2_L_PROT_KW		L2_L_PROT_KW_v7
407 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_v7
408 
409 #define	L2_S_PROT_UR		L2_S_PROT_UR_v7
410 #define	L2_S_PROT_UW		L2_S_PROT_UW_v7
411 #define	L2_S_PROT_KR		L2_S_PROT_KR_v7
412 #define	L2_S_PROT_KW		L2_S_PROT_KW_v7
413 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_v7
414 
415 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_v7
416 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_v7
417 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_v7
418 
419 #define	L1_S_COHERENT		L1_S_COHERENT_v7
420 #define	L2_L_COHERENT		L2_L_COHERENT_v7
421 #define	L2_S_COHERENT		L2_S_COHERENT_v7
422 
423 #define	L1_S_PROTO		L1_S_PROTO_v7
424 #define	L1_C_PROTO		L1_C_PROTO_v7
425 #define	L2_S_PROTO		L2_S_PROTO_v7
426 
427 /*
428  * These macros return various bits based on kernel/user and protection.
429  * Note that the compiler will usually fold these at compile time.
430  */
431 #ifndef _LOCORE
432 static __inline pt_entry_t
L1_S_PROT(int ku,vm_prot_t pr)433 L1_S_PROT(int ku, vm_prot_t pr)
434 {
435 	pt_entry_t pte;
436 
437 	if (ku == PTE_USER)
438 		pte = (pr & PROT_WRITE) ? L1_S_PROT_UW : L1_S_PROT_UR;
439 	else
440 		pte = (pr & PROT_WRITE) ? L1_S_PROT_KW : L1_S_PROT_KR;
441 
442 	if ((pr & PROT_EXEC) == 0)
443 		pte |= L1_S_V7_XN;
444 
445 	return pte;
446 }
447 static __inline pt_entry_t
L2_L_PROT(int ku,vm_prot_t pr)448 L2_L_PROT(int ku, vm_prot_t pr)
449 {
450 	pt_entry_t pte;
451 
452 	if (ku == PTE_USER)
453 		pte = (pr & PROT_WRITE) ? L2_L_PROT_UW : L2_L_PROT_UR;
454 	else
455 		pte = (pr & PROT_WRITE) ? L2_L_PROT_KW : L2_L_PROT_KR;
456 
457 	if ((pr & PROT_EXEC) == 0)
458 		pte |= L2_V7_L_XN;
459 
460 	return pte;
461 }
462 static __inline pt_entry_t
L2_S_PROT(int ku,vm_prot_t pr)463 L2_S_PROT(int ku, vm_prot_t pr)
464 {
465 	pt_entry_t pte;
466 
467 	if (ku == PTE_USER)
468 		pte = (pr & PROT_WRITE) ? L2_S_PROT_UW : L2_S_PROT_UR;
469 	else
470 		pte = (pr & PROT_WRITE) ? L2_S_PROT_KW : L2_S_PROT_KR;
471 
472 	if ((pr & PROT_EXEC) == 0)
473 		pte |= L2_V7_S_XN;
474 
475 	return pte;
476 }
477 
478 static __inline int
l2pte_is_writeable(pt_entry_t pte,struct pmap * pm)479 l2pte_is_writeable(pt_entry_t pte, struct pmap *pm)
480 {
481 	return (pte & L2_V7_AP(0x4)) == 0;
482 }
483 #endif
484 
485 /*
486  * Macros to test if a mapping is mappable with an L1 Section mapping
487  * or an L2 Large Page mapping.
488  */
489 #define	L1_S_MAPPABLE_P(va, pa, size)					\
490 	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
491 
492 #define	L2_L_MAPPABLE_P(va, pa, size)					\
493 	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
494 
495 #endif /* _KERNEL */
496 
497 #ifndef _LOCORE
498 /*
499  * pmap-specific data store in the vm_page structure.
500  */
501 struct vm_page_md {
502 	struct pv_entry *pvh_list;		/* pv_entry list */
503 	int pvh_attrs;				/* page attributes */
504 };
505 
506 #define	VM_MDPAGE_INIT(pg)						\
507 do {									\
508 	(pg)->mdpage.pvh_list = NULL;					\
509 	(pg)->mdpage.pvh_attrs = 0;					\
510 } while (/*CONSTCOND*/0)
511 #endif /* _LOCORE */
512 
513 #endif	/* _ARM_PMAP_H_ */
514