1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department and William Jolitz of UUNET Technologies Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * Derived from hp300 version by Mike Hibler, this version by William 38 * Jolitz uses a recursive map [a pde points to the page directory] to 39 * map the page tables using the pagetables themselves. This is done to 40 * reduce the impact on kernel virtual memory for lots of sparse address 41 * space, and to reduce the cost of memory to each process. 42 * 43 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 44 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 45 * $FreeBSD: src/sys/i386/include/pmap.h,v 1.65.2.3 2001/10/03 07:15:37 peter Exp $ 46 * $DragonFly: src/sys/platform/vkernel/include/pmap.h,v 1.4 2007/07/01 02:51:44 dillon Exp $ 47 */ 48 49 #ifndef _MACHINE_PMAP_H_ 50 #define _MACHINE_PMAP_H_ 51 52 #include <cpu/pmap.h> 53 54 /* 55 * Pte related macros 56 */ 57 #define KVADDR(l4, l3, l2, l1) ( \ 58 ((unsigned long)-1 << 47) | \ 59 ((unsigned long)(l4) << PML4SHIFT) | \ 60 ((unsigned long)(l3) << PDPSHIFT) | \ 61 ((unsigned long)(l2) << PDRSHIFT) | \ 62 ((unsigned long)(l1) << PAGE_SHIFT)) 63 64 #define UVADDR(l4, l3, l2, l1) ( \ 65 ((unsigned long)(l4) << PML4SHIFT) | \ 66 ((unsigned long)(l3) << PDPSHIFT) | \ 67 ((unsigned long)(l2) << PDRSHIFT) | \ 68 ((unsigned long)(l1) << PAGE_SHIFT)) 69 70 /* 71 * Initial number of kernel page tables. NKPT is now calculated in the 72 * pmap code. 73 * 74 * Give NKPDPE a generous value, allowing the kernel to map up to 128G. 75 */ 76 #define NKPML4E 1 /* number of kernel PML4 slots */ 77 #define NKPDPE 128 /* number of kernel PDP slots */ 78 79 #define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */ 80 #define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */ 81 #define NUPDE (NUPDPE*NPDEPG) /* number of userland PD entries */ 82 83 #define NDMPML4E 1 /* number of dmap PML4 slots */ 84 #define NDMPDPE NPTEPG /* number of dmap PDPE slots */ 85 86 /* 87 * The *PML4I values control the layout of virtual memory 88 */ 89 #define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */ 90 91 92 #ifndef LOCORE 93 94 #ifndef _SYS_TYPES_H_ 95 #include <sys/types.h> 96 #endif 97 #ifndef _SYS_QUEUE_H_ 98 #include <sys/queue.h> 99 #endif 100 #ifndef _SYS_SPINLOCK_H_ 101 #include <sys/spinlock.h> 102 #endif 103 #ifndef _SYS_THREAD_H_ 104 #include <sys/thread.h> 105 #endif 106 #ifndef _SYS_VKERNEL_H_ 107 #include <sys/vkernel.h> 108 #endif 109 #ifndef _MACHINE_TYPES_H_ 110 #include <machine/types.h> 111 #endif 112 #ifndef _MACHINE_PARAM_H_ 113 #include <machine/param.h> 114 #endif 115 116 #ifdef _KERNEL 117 118 vm_paddr_t pmap_kextract(vm_offset_t); 119 120 /* 121 * XXX 122 */ 123 #define vtophys(va) pmap_kextract(((vm_offset_t)(va))) 124 #define vtophys_pte(va) ((pt_entry_t)pmap_kextract(((vm_offset_t)(va)))) 125 126 #endif 127 128 #define pte_load_clear(pte) atomic_readandclear_long(pte) 129 130 /* 131 * Pmap stuff 132 */ 133 struct pv_entry; 134 struct vm_page; 135 struct vm_object; 136 137 struct md_page { 138 int pv_list_count; 139 TAILQ_HEAD(,pv_entry) pv_list; 140 }; 141 142 struct md_object { 143 }; 144 145 /* 146 * Each machine dependent implementation is expected to 147 * keep certain statistics. They may do this anyway they 148 * so choose, but are expected to return the statistics 149 * in the following structure. 150 */ 151 struct pmap_statistics { 152 long resident_count; /* # of pages mapped (total) */ 153 long wired_count; /* # of pages wired */ 154 }; 155 typedef struct pmap_statistics *pmap_statistics_t; 156 157 struct pmap { 158 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */ 159 struct vm_page *pm_pdirm; /* VM page for pg directory */ 160 vpte_t pm_pdirpte; /* pte mapping phys page */ 161 struct vm_object *pm_pteobj; /* Container for pte's */ 162 TAILQ_ENTRY(pmap) pm_pmnode; /* list of pmaps */ 163 TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ 164 TAILQ_HEAD(,pv_entry) pm_pvlist_free; /* free mappings */ 165 int pm_count; /* reference count */ 166 cpumask_t pm_active; /* active on cpus */ 167 vm_pindex_t pm_pdindex; /* page dir page in obj */ 168 struct pmap_statistics pm_stats; /* pmap statistics */ 169 struct vm_page *pm_ptphint; /* pmap ptp hint */ 170 int pm_generation; /* detect pvlist deletions */ 171 struct spinlock pm_spin; 172 struct lwkt_token pm_token; 173 }; 174 175 #define pmap_resident_count(pmap) (pmap)->pm_stats.resident_count 176 177 #define CPUMASK_LOCK CPUMASK(SMP_MAXCPU) 178 #define CPUMASK_BIT SMP_MAXCPU /* 1 << SMP_MAXCPU */ 179 180 typedef struct pmap *pmap_t; 181 182 #ifdef _KERNEL 183 extern struct pmap kernel_pmap; 184 #endif 185 186 /* 187 * For each vm_page_t, there is a list of all currently valid virtual 188 * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 189 */ 190 typedef struct pv_entry { 191 pmap_t pv_pmap; /* pmap where mapping lies */ 192 vm_offset_t pv_va; /* virtual address for mapping */ 193 TAILQ_ENTRY(pv_entry) pv_list; 194 TAILQ_ENTRY(pv_entry) pv_plist; 195 struct vm_page *pv_ptem; /* VM page for pte */ 196 } *pv_entry_t; 197 198 #ifdef _KERNEL 199 200 extern caddr_t CADDR1; 201 extern pt_entry_t *CMAP1; 202 extern char *ptvmmap; /* poor name! */ 203 extern vm_offset_t clean_sva; 204 extern vm_offset_t clean_eva; 205 206 typedef struct vm_page *vm_page_t; 207 typedef char vm_memattr_t; 208 209 void pmap_bootstrap(vm_paddr_t *, int64_t); 210 void *pmap_mapdev (vm_paddr_t, vm_size_t); 211 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); 212 void pmap_unmapdev (vm_offset_t, vm_size_t); 213 void pmap_release(struct pmap *pmap); 214 void pmap_interlock_wait (struct vmspace *); 215 216 struct vm_page *pmap_use_pt (pmap_t, vm_offset_t); 217 218 static __inline int 219 pmap_emulate_ad_bits(pmap_t pmap) { 220 return 0; 221 } 222 223 #endif /* _KERNEL */ 224 225 #endif /* !LOCORE */ 226 227 #endif /* !_MACHINE_PMAP_H_ */ 228