1 /* $OpenBSD: pmap.h,v 1.54 2023/12/11 22:12:53 kettenis Exp $ */
2
3 /*
4 * Copyright (c) 1987 Carnegie-Mellon University
5 * Copyright (c) 1992, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * Ralph Campbell.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * from: @(#)pmap.h 8.1 (Berkeley) 6/10/93
36 */
37
38 #ifndef _MIPS64_PMAP_H_
39 #define _MIPS64_PMAP_H_
40
41 #include <sys/mutex.h>
42
43 #ifdef _KERNEL
44
45 #include <machine/pte.h>
46
47 /*
48 * The user address space is currently limited to 1TB (0x0 - 0x10000000000).
49 *
50 * The user address space is mapped using a two level structure where
51 * the virtual addresses bits are split in three groups:
52 * segment:directory:page:offset
53 * where:
54 * - offset are the in-page offsets (PAGE_SHIFT bits)
55 * - page are the third level page table index
56 * (PMAP_PGSHIFT - Log2(pt_entry_t) bits)
57 * - directory are the second level page table (directory) index
58 * (PMAP_PGSHIFT - Log2(void *) bits)
59 * - segment are the first level page table (segment) index
60 * (PMAP_PGSHIFT - Log2(void *) bits)
61 *
62 * This scheme allows Segment, directory and page tables have the same size
63 * (1 << PMAP_PGSHIFT bytes, regardless of the pt_entry_t size) to be able to
64 * share the same allocator.
65 *
66 * Note: The kernel doesn't use the same data structures as user programs.
67 * All the PTE entries are stored in a single array in Sysmap which is
68 * dynamically allocated at boot time.
69 */
70
71 #if defined(MIPS_PTE64) && PAGE_SHIFT == 12
72 #error "Cannot use MIPS_PTE64 with 4KB pages."
73 #endif
74
75 /*
76 * Size of page table structs (page tables, page directories,
77 * and segment table) used by this pmap.
78 */
79
80 #define PMAP_PGSHIFT 12
81 #define PMAP_PGSIZE (1UL << PMAP_PGSHIFT)
82
83 #define NPDEPG (PMAP_PGSIZE / sizeof(void *))
84 #define NPTEPG (PMAP_PGSIZE / sizeof(pt_entry_t))
85
86 /*
87 * Segment sizes
88 */
89
90 #define SEGSHIFT (PAGE_SHIFT+PMAP_PGSHIFT*2-PTE_LOG-3)
91 #define DIRSHIFT (PAGE_SHIFT+PMAP_PGSHIFT-PTE_LOG)
92 #define NBSEG (1UL << SEGSHIFT)
93 #define NBDIR (1UL << DIRSHIFT)
94 #define SEGOFSET (NBSEG - 1)
95 #define DIROFSET (NBDIR - 1)
96
97 #define mips_trunc_seg(x) ((vaddr_t)(x) & ~SEGOFSET)
98 #define mips_trunc_dir(x) ((vaddr_t)(x) & ~DIROFSET)
99 #define mips_round_seg(x) (((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET)
100 #define mips_round_dir(x) (((vaddr_t)(x) + DIROFSET) & ~DIROFSET)
101 #define pmap_segmap(m, v) ((m)->pm_segtab->seg_tab[((v) >> SEGSHIFT)])
102
103 /* number of segments entries */
104 #define PMAP_SEGTABSIZE (PMAP_PGSIZE / sizeof(void *))
105
106 struct segtab {
107 pt_entry_t **seg_tab[PMAP_SEGTABSIZE];
108 };
109
110 struct pmap_asid_info {
111 u_int pma_asid; /* address space tag */
112 u_int pma_asidgen; /* TLB PID generation number */
113 };
114
115 /*
116 * Machine dependent pmap structure.
117 */
118 typedef struct pmap {
119 struct mutex pm_mtx; /* pmap lock */
120 struct mutex pm_swmtx; /* pmap switch lock */
121 int pm_count; /* pmap reference count */
122 struct pmap_statistics pm_stats; /* pmap statistics */
123 struct segtab *pm_segtab; /* pointers to pages of PTEs */
124 struct pmap_asid_info pm_asid[1]; /* ASID information */
125 } *pmap_t;
126
127 /*
128 * Compute the sizeof of a pmap structure. Subtract one because one
129 * ASID info structure is already included in the pmap structure itself.
130 */
131 #define PMAP_SIZEOF(x) \
132 (ALIGN(sizeof(struct pmap) + \
133 (sizeof(struct pmap_asid_info) * ((x) - 1))))
134
135 /* machine-dependent pg_flags */
136 #define PGF_UNCACHED PG_PMAP0 /* Page is explicitly uncached */
137 #define PGF_CACHED PG_PMAP1 /* Page is currently cached */
138 #define PGF_ATTR_MOD PG_PMAP2
139 #define PGF_ATTR_REF PG_PMAP3
140 #define PGF_PRESERVE (PGF_ATTR_MOD | PGF_ATTR_REF)
141
142 #define PMAP_NOCACHE PMAP_MD0
143
144 extern struct pmap *const kernel_pmap_ptr;
145
146 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
147 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
148 #define pmap_kernel() (kernel_pmap_ptr)
149
150 extern pt_entry_t pg_ri;
151 #define PMAP_CHECK_COPYIN (pg_ri == 0)
152
153 #define PMAP_STEAL_MEMORY /* Enable 'stealing' during boot */
154
155 #define PMAP_PREFER
156 extern vaddr_t pmap_prefer_mask;
157 /* pmap prefer alignment */
158 #define PMAP_PREFER_ALIGN() \
159 (pmap_prefer_mask ? pmap_prefer_mask + 1 : 0)
160 /* pmap prefer offset in alignment */
161 #define PMAP_PREFER_OFFSET(of) ((of) & pmap_prefer_mask)
162
163 void pmap_bootstrap(void);
164 int pmap_copyinsn(pmap_t, vaddr_t, uint32_t *);
165 int pmap_emulate_modify(pmap_t, vaddr_t);
166 void pmap_page_cache(vm_page_t, u_int);
167
168 #define pmap_init_percpu() do { /* nothing */ } while (0)
169 #define pmap_unuse_final(p) do { /* nothing yet */ } while (0)
170 #define pmap_remove_holes(vm) do { /* nothing */ } while (0)
171
172 #define __HAVE_PMAP_DIRECT
173 vaddr_t pmap_map_direct(vm_page_t);
174 vm_page_t pmap_unmap_direct(vaddr_t);
175
176 #define __HAVE_PMAP_COLLECT
177
178 /*
179 * MD flags to pmap_enter:
180 */
181
182 #define PMAP_PA_MASK ~((paddr_t)PAGE_MASK)
183
184 /* Kernel virtual address to page table entry */
185 #define kvtopte(va) \
186 (Sysmap + (((vaddr_t)(va) - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT))
187 /* User virtual address to pte page entry */
188 #define uvtopte(va) (((va) >> PAGE_SHIFT) & (NPTEPG -1))
189 #define uvtopde(va) (((va) >> DIRSHIFT) & (NPDEPG - 1))
190
191 static inline pt_entry_t *
pmap_pte_lookup(struct pmap * pmap,vaddr_t va)192 pmap_pte_lookup(struct pmap *pmap, vaddr_t va)
193 {
194 pt_entry_t **pde, *pte;
195
196 if ((pde = pmap_segmap(pmap, va)) == NULL)
197 return NULL;
198 if ((pte = pde[uvtopde(va)]) == NULL)
199 return NULL;
200 return pte + uvtopte(va);
201 }
202
203 extern pt_entry_t *Sysmap; /* kernel pte table */
204 extern u_int Sysmapsize; /* number of pte's in Sysmap */
205
206 #endif /* _KERNEL */
207
208 #if !defined(_LOCORE)
209 typedef struct pv_entry {
210 struct pv_entry *pv_next; /* next pv_entry */
211 struct pmap *pv_pmap; /* pmap where mapping lies */
212 vaddr_t pv_va; /* virtual address for mapping */
213 } *pv_entry_t;
214
215 struct vm_page_md {
216 struct mutex pv_mtx; /* pv list lock */
217 struct pv_entry pv_ent; /* pv list of this seg */
218 };
219
220 #define VM_MDPAGE_INIT(pg) \
221 do { \
222 mtx_init(&(pg)->mdpage.pv_mtx, IPL_VM); \
223 (pg)->mdpage.pv_ent.pv_next = NULL; \
224 (pg)->mdpage.pv_ent.pv_pmap = NULL; \
225 (pg)->mdpage.pv_ent.pv_va = 0; \
226 } while (0)
227
228 #endif /* !_LOCORE */
229
230 #endif /* !_MIPS64_PMAP_H_ */
231