1 /* $NetBSD: pmap.h,v 1.56 2015/04/03 01:04:23 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * Copyright (c) 2001 Wasabi Systems, Inc.
30 * All rights reserved.
31 *
32 * Written by Frank van der Linden for Wasabi Systems, Inc.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software
43 * must display the following acknowledgement:
44 * This product includes software developed for the NetBSD Project by
45 * Wasabi Systems, Inc.
46 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
47 * or promote products derived from this software without specific prior
48 * written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
52 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
53 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
54 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
55 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
56 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
57 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
58 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
59 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
60 * POSSIBILITY OF SUCH DAMAGE.
61 */
62
63 /*
64 * pmap.h: see pmap.c for the history of this pmap module.
65 */
66
67 #ifndef _X86_PMAP_H_
68 #define _X86_PMAP_H_
69
70 /*
71 * pl*_pi: index in the ptp page for a pde mapping a VA.
72 * (pl*_i below is the index in the virtual array of all pdes per level)
73 */
74 #define pl1_pi(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
75 #define pl2_pi(VA) (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
76 #define pl3_pi(VA) (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
77 #define pl4_pi(VA) (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
78
79 /*
80 * pl*_i: generate index into pde/pte arrays in virtual space
81 *
82 * pl_i(va, X) == plX_i(va) <= pl_i_roundup(va, X)
83 */
84 #define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
85 #define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
86 #define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
87 #define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
88 #define pl_i(va, lvl) \
89 (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
90
91 #define pl_i_roundup(va, lvl) pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl))
92
93 /*
94 * PTP macros:
95 * a PTP's index is the PD index of the PDE that points to it
96 * a PTP's offset is the byte-offset in the PTE space that this PTP is at
97 * a PTP's VA is the first VA mapped by that PTP
98 */
99
100 #define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * PAGE_SIZE)
101
102 /* size of a PDP: usually one page, except for PAE */
103 #ifdef PAE
104 #define PDP_SIZE 4
105 #else
106 #define PDP_SIZE 1
107 #endif
108
109
110 #if defined(_KERNEL)
111 #include <sys/kcpuset.h>
112
113 /*
114 * pmap data structures: see pmap.c for details of locking.
115 */
116
117 /*
118 * we maintain a list of all non-kernel pmaps
119 */
120
121 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
122
123 /*
124 * linked list of all non-kernel pmaps
125 */
126 extern struct pmap_head pmaps;
127 extern kmutex_t pmaps_lock; /* protects pmaps */
128
129 /*
130 * pool_cache(9) that PDPs are allocated from
131 */
132 extern struct pool_cache pmap_pdp_cache;
133
134 /*
135 * the pmap structure
136 *
137 * note that the pm_obj contains the lock pointer, the reference count,
138 * page list, and number of PTPs within the pmap.
139 *
140 * pm_lock is the same as the lock for vm object 0. Changes to
141 * the other objects may only be made if that lock has been taken
142 * (the other object locks are only used when uvm_pagealloc is called)
143 */
144
145 struct pmap {
146 struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
147 #define pm_lock pm_obj[0].vmobjlock
148 kmutex_t pm_obj_lock[PTP_LEVELS-1]; /* locks for pm_objs */
149 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
150 pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
151 paddr_t pm_pdirpa[PDP_SIZE]; /* PA of PDs (read-only after create) */
152 struct vm_page *pm_ptphint[PTP_LEVELS-1];
153 /* pointer to a PTP in our pmap */
154 struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
155
156 #if !defined(__x86_64__)
157 vaddr_t pm_hiexec; /* highest executable mapping */
158 #endif /* !defined(__x86_64__) */
159 int pm_flags; /* see below */
160
161 union descriptor *pm_ldt; /* user-set LDT */
162 size_t pm_ldt_len; /* size of LDT in bytes */
163 int pm_ldt_sel; /* LDT selector */
164 kcpuset_t *pm_cpus; /* mask of CPUs using pmap */
165 kcpuset_t *pm_kernel_cpus; /* mask of CPUs using kernel part
166 of pmap */
167 kcpuset_t *pm_xen_ptp_cpus; /* mask of CPUs which have this pmap's
168 ptp mapped */
169 uint64_t pm_ncsw; /* for assertions */
170 struct vm_page *pm_gc_ptp; /* pages from pmap g/c */
171 };
172
173 /* macro to access pm_pdirpa slots */
174 #ifdef PAE
175 #define pmap_pdirpa(pmap, index) \
176 ((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t))
177 #else
178 #define pmap_pdirpa(pmap, index) \
179 ((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t))
180 #endif
181
182 /*
183 * flag to be used for kernel mappings: PG_u on Xen/amd64,
184 * 0 otherwise.
185 */
186 #if defined(XEN) && defined(__x86_64__)
187 #define PG_k PG_u
188 #else
189 #define PG_k 0
190 #endif
191
192 /*
193 * MD flags that we use for pmap_enter and pmap_kenter_pa:
194 */
195
196 /*
197 * global kernel variables
198 */
199
200 /*
201 * PDPpaddr is the physical address of the kernel's PDP.
202 * - i386 non-PAE and amd64: PDPpaddr corresponds directly to the %cr3
203 * value associated to the kernel process, proc0.
204 * - i386 PAE: it still represents the PA of the kernel's PDP (L2). Due to
205 * the L3 PD, it cannot be considered as the equivalent of a %cr3 any more.
206 * - Xen: it corresponds to the PFN of the kernel's PDP.
207 */
208 extern u_long PDPpaddr;
209
210 extern int pmap_pg_g; /* do we support PG_G? */
211 extern long nkptp[PTP_LEVELS];
212
213 /*
214 * macros
215 */
216
217 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
218 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
219
220 #define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M)
221 #define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U)
222 #define pmap_copy(DP,SP,D,L,S) __USE(L)
223 #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
224 #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
225 #define pmap_move(DP,SP,D,L,S)
226 #define pmap_phys_address(ppn) (x86_ptob(ppn) & ~X86_MMAP_FLAG_MASK)
227 #define pmap_mmap_flags(ppn) x86_mmap_flags(ppn)
228 #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
229
230 #if defined(__x86_64__) || defined(PAE)
231 #define X86_MMAP_FLAG_SHIFT (64 - PGSHIFT)
232 #else
233 #define X86_MMAP_FLAG_SHIFT (32 - PGSHIFT)
234 #endif
235
236 #define X86_MMAP_FLAG_MASK 0xf
237 #define X86_MMAP_FLAG_PREFETCH 0x1
238
239 /*
240 * prototypes
241 */
242
243 void pmap_activate(struct lwp *);
244 void pmap_bootstrap(vaddr_t);
245 bool pmap_clear_attrs(struct vm_page *, unsigned);
246 bool pmap_pv_clear_attrs(paddr_t, unsigned);
247 void pmap_deactivate(struct lwp *);
248 void pmap_page_remove(struct vm_page *);
249 void pmap_pv_remove(paddr_t);
250 void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
251 bool pmap_test_attrs(struct vm_page *, unsigned);
252 void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
253 void pmap_load(void);
254 paddr_t pmap_init_tmp_pgtbl(paddr_t);
255 void pmap_remove_all(struct pmap *);
256 void pmap_ldt_sync(struct pmap *);
257 void pmap_kremove_local(vaddr_t, vsize_t);
258
259 void pmap_emap_enter(vaddr_t, paddr_t, vm_prot_t);
260 void pmap_emap_remove(vaddr_t, vsize_t);
261 void pmap_emap_sync(bool);
262
263 #define __HAVE_PMAP_PV_TRACK 1
264 void pmap_pv_init(void);
265 void pmap_pv_track(paddr_t, psize_t);
266 void pmap_pv_untrack(paddr_t, psize_t);
267
268 void pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
269 pd_entry_t * const **);
270 void pmap_unmap_ptes(struct pmap *, struct pmap *);
271
272 int pmap_pdes_invalid(vaddr_t, pd_entry_t * const *, pd_entry_t *);
273
274 u_int x86_mmap_flags(paddr_t);
275
276 bool pmap_is_curpmap(struct pmap *);
277
278 vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
279
280 typedef enum tlbwhy {
281 TLBSHOOT_APTE,
282 TLBSHOOT_KENTER,
283 TLBSHOOT_KREMOVE,
284 TLBSHOOT_FREE_PTP1,
285 TLBSHOOT_FREE_PTP2,
286 TLBSHOOT_REMOVE_PTE,
287 TLBSHOOT_REMOVE_PTES,
288 TLBSHOOT_SYNC_PV1,
289 TLBSHOOT_SYNC_PV2,
290 TLBSHOOT_WRITE_PROTECT,
291 TLBSHOOT_ENTER,
292 TLBSHOOT_UPDATE,
293 TLBSHOOT_BUS_DMA,
294 TLBSHOOT_BUS_SPACE,
295 TLBSHOOT__MAX,
296 } tlbwhy_t;
297
298 void pmap_tlb_init(void);
299 void pmap_tlb_cpu_init(struct cpu_info *);
300 void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t);
301 void pmap_tlb_shootnow(void);
302 void pmap_tlb_intr(void);
303
304 #define __HAVE_PMAP_EMAP
305
306 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
307 #define PMAP_FORK /* turn on pmap_fork interface */
308
309 /*
310 * Do idle page zero'ing uncached to avoid polluting the cache.
311 */
312 bool pmap_pageidlezero(paddr_t);
313 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
314
315 /*
316 * inline functions
317 */
318
319 __inline static bool __unused
pmap_pdes_valid(vaddr_t va,pd_entry_t * const * pdes,pd_entry_t * lastpde)320 pmap_pdes_valid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
321 {
322 return pmap_pdes_invalid(va, pdes, lastpde) == 0;
323 }
324
325 /*
326 * pmap_update_pg: flush one page from the TLB (or flush the whole thing
327 * if hardware doesn't support one-page flushing)
328 */
329
330 __inline static void __unused
pmap_update_pg(vaddr_t va)331 pmap_update_pg(vaddr_t va)
332 {
333 invlpg(va);
334 }
335
336 /*
337 * pmap_update_2pg: flush two pages from the TLB
338 */
339
340 __inline static void __unused
pmap_update_2pg(vaddr_t va,vaddr_t vb)341 pmap_update_2pg(vaddr_t va, vaddr_t vb)
342 {
343 invlpg(va);
344 invlpg(vb);
345 }
346
347 /*
348 * pmap_page_protect: change the protection of all recorded mappings
349 * of a managed page
350 *
351 * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
352 * => we only have to worry about making the page more protected.
353 * unprotecting a page is done on-demand at fault time.
354 */
355
356 __inline static void __unused
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)357 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
358 {
359 if ((prot & VM_PROT_WRITE) == 0) {
360 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
361 (void) pmap_clear_attrs(pg, PG_RW);
362 } else {
363 pmap_page_remove(pg);
364 }
365 }
366 }
367
368 /*
369 * pmap_pv_protect: change the protection of all recorded mappings
370 * of an unmanaged page
371 */
372
373 __inline static void __unused
pmap_pv_protect(paddr_t pa,vm_prot_t prot)374 pmap_pv_protect(paddr_t pa, vm_prot_t prot)
375 {
376 if ((prot & VM_PROT_WRITE) == 0) {
377 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
378 (void) pmap_pv_clear_attrs(pa, PG_RW);
379 } else {
380 pmap_pv_remove(pa);
381 }
382 }
383 }
384
385 /*
386 * pmap_protect: change the protection of pages in a pmap
387 *
388 * => this function is a frontend for pmap_remove/pmap_write_protect
389 * => we only have to worry about making the page more protected.
390 * unprotecting a page is done on-demand at fault time.
391 */
392
393 __inline static void __unused
pmap_protect(struct pmap * pmap,vaddr_t sva,vaddr_t eva,vm_prot_t prot)394 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
395 {
396 if ((prot & VM_PROT_WRITE) == 0) {
397 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
398 pmap_write_protect(pmap, sva, eva, prot);
399 } else {
400 pmap_remove(pmap, sva, eva);
401 }
402 }
403 }
404
405 /*
406 * various address inlines
407 *
408 * vtopte: return a pointer to the PTE mapping a VA, works only for
409 * user and PT addresses
410 *
411 * kvtopte: return a pointer to the PTE mapping a kernel VA
412 */
413
414 #include <lib/libkern/libkern.h>
415
416 static __inline pt_entry_t * __unused
vtopte(vaddr_t va)417 vtopte(vaddr_t va)
418 {
419
420 KASSERT(va < VM_MIN_KERNEL_ADDRESS);
421
422 return (PTE_BASE + pl1_i(va));
423 }
424
425 static __inline pt_entry_t * __unused
kvtopte(vaddr_t va)426 kvtopte(vaddr_t va)
427 {
428 pd_entry_t *pde;
429
430 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
431
432 pde = L2_BASE + pl2_i(va);
433 if (*pde & PG_PS)
434 return ((pt_entry_t *)pde);
435
436 return (PTE_BASE + pl1_i(va));
437 }
438
439 paddr_t vtophys(vaddr_t);
440 vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
441 void pmap_cpu_init_late(struct cpu_info *);
442 bool sse2_idlezero_page(void *);
443
444 #ifdef XEN
445 #include <sys/bitops.h>
446
447 #define XPTE_MASK L1_FRAME
448 /* Selects the index of a PTE in (A)PTE_BASE */
449 #define XPTE_SHIFT (L1_SHIFT - ilog2(sizeof(pt_entry_t)))
450
451 /* PTE access inline fuctions */
452
453 /*
454 * Get the machine address of the pointed pte
455 * We use hardware MMU to get value so works only for levels 1-3
456 */
457
458 static __inline paddr_t
xpmap_ptetomach(pt_entry_t * pte)459 xpmap_ptetomach(pt_entry_t *pte)
460 {
461 pt_entry_t *up_pte;
462 vaddr_t va = (vaddr_t) pte;
463
464 va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
465 up_pte = (pt_entry_t *) va;
466
467 return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK)));
468 }
469
470 /* Xen helpers to change bits of a pte */
471 #define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */
472
473 paddr_t vtomach(vaddr_t);
474 #define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
475 #endif /* XEN */
476
477 /* pmap functions with machine addresses */
478 void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int);
479 int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
480 vm_prot_t, u_int, int);
481 bool pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
482
483 /*
484 * Hooks for the pool allocator.
485 */
486 #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
487
488 #ifdef __HAVE_DIRECT_MAP
489
490 #define L4_SLOT_DIRECT 509
491 #define PDIR_SLOT_DIRECT L4_SLOT_DIRECT
492
493 #define PMAP_DIRECT_BASE (VA_SIGN_NEG((L4_SLOT_DIRECT * NBPD_L4)))
494 #define PMAP_DIRECT_END (VA_SIGN_NEG(((L4_SLOT_DIRECT + 1) * NBPD_L4)))
495
496 #define PMAP_DIRECT_MAP(pa) ((vaddr_t)PMAP_DIRECT_BASE + (pa))
497 #define PMAP_DIRECT_UNMAP(va) ((paddr_t)(va) - PMAP_DIRECT_BASE)
498
499 /*
500 * Alternate mapping hooks for pool pages.
501 */
502 #define PMAP_MAP_POOLPAGE(pa) PMAP_DIRECT_MAP((pa))
503 #define PMAP_UNMAP_POOLPAGE(va) PMAP_DIRECT_UNMAP((va))
504
505 void pagezero(vaddr_t);
506
507 #endif /* __HAVE_DIRECT_MAP */
508
509 #endif /* _KERNEL */
510
511 #endif /* _X86_PMAP_H_ */
512