xref: /openbsd/sys/arch/arm64/include/pmap.h (revision 3bef86f7)
1 /* $OpenBSD: pmap.h,v 1.25 2023/12/11 22:12:53 kettenis Exp $ */
2 /*
3  * Copyright (c) 2008,2009,2014 Dale Rahn <drahn@dalerahn.com>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 #ifndef	_ARM64_PMAP_H_
18 #define	_ARM64_PMAP_H_
19 
20 #ifndef _LOCORE
21 #include <sys/mutex.h>
22 #include <sys/queue.h>
23 #include <machine/pte.h>
24 #endif
25 
26 
27 /* V->P mapping data */
28 #define VP_IDX0_CNT	512
29 #define VP_IDX0_MASK	(VP_IDX0_CNT-1)
30 #define VP_IDX0_POS	39
31 #define VP_IDX1_CNT	512
32 #define VP_IDX1_MASK	(VP_IDX1_CNT-1)
33 #define VP_IDX1_POS	30
34 #define VP_IDX2_CNT	512
35 #define VP_IDX2_MASK	(VP_IDX2_CNT-1)
36 #define VP_IDX2_POS	21
37 #define VP_IDX3_CNT	512
38 #define VP_IDX3_MASK	(VP_IDX3_CNT-1)
39 #define VP_IDX3_POS	12
40 
41 /* cache flags */
42 #define PMAP_CACHE_CI		(PMAP_MD0)		/* cache inhibit */
43 #define PMAP_CACHE_WT		(PMAP_MD1)	 	/* writethru */
44 #define PMAP_CACHE_WB		(PMAP_MD1|PMAP_MD0)	/* writeback */
45 #define PMAP_CACHE_DEV_NGNRNE	(PMAP_MD2)		/* device nGnRnE */
46 #define PMAP_CACHE_DEV_NGNRE	(PMAP_MD2|PMAP_MD0)	/* device nGnRE */
47 #define PMAP_CACHE_BITS		(PMAP_MD0|PMAP_MD1|PMAP_MD2)
48 
49 #define PTED_VA_MANAGED_M	(PMAP_MD3)
50 #define PTED_VA_WIRED_M		(PMAP_MD3 << 1)
51 #define PTED_VA_EXEC_M		(PMAP_MD3 << 2)
52 
53 
54 #if defined(_KERNEL) && !defined(_LOCORE)
55 /*
56  * Pmap stuff
57  */
58 
59 typedef struct pmap *pmap_t;
60 
61 struct pmap {
62 	struct mutex pm_mtx;
63 	union {
64 		struct pmapvp0 *l0;	/* virtual to physical table 4 lvl */
65 		struct pmapvp1 *l1;	/* virtual to physical table 3 lvl */
66 	} pm_vp;
67 	uint64_t pm_pt0pa;
68 	uint64_t pm_asid;
69 	uint64_t pm_guarded;
70 	int have_4_level_pt;
71 	int pm_privileged;
72 	int pm_refs;				/* ref count */
73 	struct pmap_statistics  pm_stats;	/* pmap statistics */
74 	uint64_t pm_apiakey[2];
75 	uint64_t pm_apdakey[2];
76 	uint64_t pm_apibkey[2];
77 	uint64_t pm_apdbkey[2];
78 	uint64_t pm_apgakey[2];
79 };
80 
81 #define PMAP_PA_MASK	~((paddr_t)PAGE_MASK) /* to remove the flags */
82 #define PMAP_NOCACHE	0x1 /* non-cacheable memory */
83 #define PMAP_DEVICE	0x2 /* device memory */
84 #define PMAP_WC		PMAP_DEVICE
85 
86 #define PG_PMAP_MOD		PG_PMAP0
87 #define PG_PMAP_REF		PG_PMAP1
88 #define PG_PMAP_EXE		PG_PMAP2
89 
90 // [NCPUS]
91 extern paddr_t zero_page;
92 extern paddr_t copy_src_page;
93 extern paddr_t copy_dst_page;
94 
95 void pagezero_cache(vaddr_t);
96 
97 extern struct pmap kernel_pmap_;
98 #define pmap_kernel()   		(&kernel_pmap_)
99 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
100 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
101 
102 vaddr_t pmap_bootstrap(long kvo, paddr_t lpt1,  long kernelstart,
103     long kernelend, long ram_start, long ram_end);
104 void pmap_postinit(void);
105 void pmap_init_percpu(void);
106 
107 void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable);
108 void pmap_page_ro(pmap_t pm, vaddr_t va, vm_prot_t prot);
109 void pmap_page_rw(pmap_t pm, vaddr_t va);
110 
111 void pmap_setpauthkeys(struct pmap *);
112 
113 paddr_t pmap_steal_avail(size_t size, int align, void **kva);
114 void pmap_avail_fixup(void);
115 void pmap_physload_avail(void);
116 
117 #define PMAP_GROWKERNEL
118 
119 struct pv_entry;
120 
121 /* investigate */
122 #define pmap_unuse_final(p)		do { /* nothing */ } while (0)
123 int	pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t);
124 
125 #define __HAVE_PMAP_MPSAFE_ENTER_COW
126 
127 #endif /* _KERNEL && !_LOCORE */
128 
129 #ifndef _LOCORE
130 struct vm_page_md {
131 	struct mutex pv_mtx;
132 	LIST_HEAD(,pte_desc) pv_list;
133 };
134 
135 #define VM_MDPAGE_INIT(pg) do {			\
136 	mtx_init(&(pg)->mdpage.pv_mtx, IPL_VM);	\
137 	LIST_INIT(&((pg)->mdpage.pv_list));	\
138 } while (0)
139 #endif	/* _LOCORE */
140 
141 #endif	/* _ARM64_PMAP_H_ */
142