1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * Copyright (c) 2003 Peter Wemm. 4 * Copyright (c) 2008 The DragonFly Project. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department and William Jolitz of UUNET Technologies Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * Derived from hp300 version by Mike Hibler, this version by William 40 * Jolitz uses a recursive map [a pde points to the page directory] to 41 * map the page tables using the pagetables themselves. This is done to 42 * reduce the impact on kernel virtual memory for lots of sparse address 43 * space, and to reduce the cost of memory to each process. 44 * 45 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 46 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 47 * $FreeBSD: src/sys/i386/include/pmap.h,v 1.65.2.3 2001/10/03 07:15:37 peter Exp $ 48 */ 49 50 #ifndef _MACHINE_PMAP_H_ 51 #define _MACHINE_PMAP_H_ 52 53 #include <cpu/pmap.h> 54 55 /* 56 * Size of Kernel address space. This is the number of page table pages 57 * (2GB each) to use for the kernel. 256 pages == 512 Gigabytes. 58 * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc). 59 */ 60 #ifndef KVA_PAGES 61 #define KVA_PAGES 256 62 #endif 63 64 /* 65 * Pte related macros. This is complicated by having to deal with 66 * the sign extension of the 48th bit. 67 */ 68 #define KVADDR(l4, l3, l2, l1) ( \ 69 ((unsigned long)-1 << 47) | \ 70 ((unsigned long)(l4) << PML4SHIFT) | \ 71 ((unsigned long)(l3) << PDPSHIFT) | \ 72 ((unsigned long)(l2) << PDRSHIFT) | \ 73 ((unsigned long)(l1) << PAGE_SHIFT)) 74 75 #define UVADDR(l4, l3, l2, l1) ( \ 76 ((unsigned long)(l4) << PML4SHIFT) | \ 77 ((unsigned long)(l3) << PDPSHIFT) | \ 78 ((unsigned long)(l2) << PDRSHIFT) | \ 79 ((unsigned long)(l1) << PAGE_SHIFT)) 80 81 /* 82 * NOTE: We no longer hardwire NKPT, it is calculated in create_pagetables() 83 */ 84 #define NKPML4E 1 /* number of kernel PML4 slots */ 85 /* NKPDPE defined in vmparam.h */ 86 87 /* 88 * NUPDPs 512 (256 user) number of PDPs in user page table 89 * NUPDs 512 * 512 number of PDs in user page table 90 * NUPTs 512 * 512 * 512 number of PTs in user page table 91 * NUPTEs 512 * 512 * 512 * 512 number of PTEs in user page table 92 * 93 * NUPDP_USER number of PDPs reserved for userland 94 * NUPTE_USER number of PTEs reserved for userland (big number) 95 */ 96 #define NUPDP_USER (NPML4EPG/2) 97 #define NUPDP_TOTAL (NPML4EPG) 98 #define NUPD_TOTAL (NPDPEPG * NUPDP_TOTAL) 99 #define NUPT_TOTAL (NPDEPG * NUPD_TOTAL) 100 #define NUPTE_TOTAL ((vm_pindex_t)NPTEPG * NUPT_TOTAL) 101 #define NUPTE_USER ((vm_pindex_t)NPTEPG * NPDEPG * NPDPEPG * NUPDP_USER) 102 103 /* 104 * Number of 512G dmap PML4 slots (max ~254 or so but don't go over 64, 105 * which gives us 32TB of ram). Because we cache free, empty pmaps the 106 * initialization overhead is minimal. 107 * 108 * It should be possible to bump this up to 255 (but not 256), which would 109 * be able to address a maximum of ~127TB of physical ram. 110 */ 111 #define NDMPML4E 64 112 113 /* 114 * The *PML4I values control the layout of virtual memory. Each PML4 115 * entry represents 512G. 116 */ 117 #define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */ 118 119 #define KPML4I (NPML4EPG-1) /* Top 512GB for KVM */ 120 #define DMPML4I (KPML4I-NDMPML4E) /* Next 512GBxN down for dmap */ 121 122 /* 123 * The location of KERNBASE in the last PD of the kernel's KVM (KPML4I) 124 * space. Each PD represents 1GB. The kernel must be placed here 125 * for the compile/link options to work properly so absolute 32-bit 126 * addressing can be used to access stuff. 127 */ 128 #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */ 129 130 /* 131 * per-CPU data assume ~64K x SMP_MAXCPU, say up to 256 cpus 132 * in the future or 16MB of space. Each PD represents 2MB so 133 * use NPDEPG-8 to place the per-CPU data. 134 */ 135 #define MPPML4I KPML4I 136 #define MPPDPI KPDPI 137 #define MPPTDI (NPDEPG-8) 138 139 /* 140 * XXX doesn't really belong here I guess... 141 */ 142 #define ISA_HOLE_START 0xa0000 143 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START) 144 145 #ifndef LOCORE 146 147 #ifndef _SYS_TYPES_H_ 148 #include <sys/types.h> 149 #endif 150 #ifndef _SYS_QUEUE_H_ 151 #include <sys/queue.h> 152 #endif 153 #ifndef _SYS_TREE_H_ 154 #include <sys/tree.h> 155 #endif 156 #ifndef _SYS_SPINLOCK_H_ 157 #include <sys/spinlock.h> 158 #endif 159 #ifndef _SYS_THREAD_H_ 160 #include <sys/thread.h> 161 #endif 162 #ifndef _MACHINE_TYPES_H_ 163 #include <machine/types.h> 164 #endif 165 #ifndef _MACHINE_PARAM_H_ 166 #include <machine/param.h> 167 #endif 168 169 /* 170 * Address of current and alternate address space page table maps 171 * and directories. 172 */ 173 #ifdef _KERNEL 174 #define addr_PTmap (KVADDR(PML4PML4I, 0, 0, 0)) 175 #define addr_PDmap (KVADDR(PML4PML4I, PML4PML4I, 0, 0)) 176 #define addr_PDPmap (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0)) 177 #define addr_PML4map (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I)) 178 #define addr_PML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t))) 179 #define PTmap ((pt_entry_t *)(addr_PTmap)) 180 #define PDmap ((pd_entry_t *)(addr_PDmap)) 181 #define PDPmap ((pd_entry_t *)(addr_PDPmap)) 182 #define PML4map ((pd_entry_t *)(addr_PML4map)) 183 #define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e)) 184 185 extern u_int64_t KPML4phys; /* physical address of kernel level 4 */ 186 #endif 187 188 #ifdef _KERNEL 189 190 /* 191 * XXX 192 */ 193 #define vtophys(va) pmap_kextract(((vm_offset_t)(va))) 194 #define vtophys_pte(va) ((pt_entry_t)pmap_kextract(((vm_offset_t)(va)))) 195 196 #endif 197 198 #define pte_load_clear(pte) atomic_readandclear_long(pte) 199 200 static __inline void 201 pte_store(pt_entry_t *ptep, pt_entry_t pte) 202 { 203 *ptep = pte; 204 } 205 206 #define pde_store(pdep, pde) pte_store((pdep), (pde)) 207 208 /* 209 * Pmap stuff 210 */ 211 struct pmap; 212 struct pv_entry; 213 struct vm_page; 214 struct vm_object; 215 struct vmspace; 216 217 TAILQ_HEAD(md_page_pv_list, pv_entry); 218 /* 219 * vm_page structures embed a list of related pv_entry's 220 */ 221 struct md_page { 222 struct md_page_pv_list pv_list; 223 }; 224 225 /* 226 * vm_object's representing large mappings can contain embedded pmaps 227 * to organize sharing at higher page table levels for PROT_READ and 228 * PROT_READ|PROT_WRITE maps. 229 */ 230 struct md_object { 231 struct pmap *pmap_rw; 232 struct pmap *pmap_ro; 233 }; 234 235 /* 236 * Each machine dependent implementation is expected to 237 * keep certain statistics. They may do this anyway they 238 * so choose, but are expected to return the statistics 239 * in the following structure. 240 * 241 * NOTE: We try to match the size of the pc32 pmap with the vkernel pmap 242 * so the same utilities (like 'ps') can be used on both. 243 */ 244 struct pmap_statistics { 245 long resident_count; /* # of pages mapped (total) */ 246 long wired_count; /* # of pages wired */ 247 }; 248 typedef struct pmap_statistics *pmap_statistics_t; 249 250 struct pv_entry_rb_tree; 251 RB_PROTOTYPE2(pv_entry_rb_tree, pv_entry, pv_entry, 252 pv_entry_compare, vm_pindex_t); 253 254 /* Types of PMAP (regular, EPT Intel, NPT Amd) */ 255 #define REGULAR_PMAP 0 256 #define EPT_PMAP 1 257 258 /* Bits indexes in pmap_bits */ 259 #define TYPE_IDX 0 260 #define PG_V_IDX 1 261 #define PG_RW_IDX 2 262 #define PG_U_IDX 3 263 #define PG_A_IDX 4 264 #define PG_M_IDX 5 265 #define PG_PS_IDX 6 266 #define PG_G_IDX 7 267 #define PG_W_IDX 8 268 #define PG_MANAGED_IDX 9 269 #define PG_DEVICE_IDX 10 270 #define PG_N_IDX 11 271 #define PG_BITS_SIZE 12 272 273 #define PROTECTION_CODES_SIZE 8 274 #define PAT_INDEX_SIZE 8 275 276 struct pmap { 277 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */ 278 struct pv_entry *pm_pmlpv; /* PV entry for pml4 */ 279 TAILQ_ENTRY(pmap) pm_pmnode; /* list of pmaps */ 280 RB_HEAD(pv_entry_rb_tree, pv_entry) pm_pvroot; 281 int pm_count; /* reference count */ 282 cpumask_t pm_active; /* active on cpus */ 283 int pm_flags; 284 struct pmap_statistics pm_stats; /* pmap statistics */ 285 struct pv_entry *pm_pvhint; /* pv_entry lookup hint */ 286 int pm_generation; /* detect pvlist deletions */ 287 struct spinlock pm_spin; 288 struct lwkt_token pm_token; 289 long pm_invgen; 290 uint64_t pmap_bits[PG_BITS_SIZE]; 291 int protection_codes[PROTECTION_CODES_SIZE]; 292 pt_entry_t pmap_cache_bits[PAT_INDEX_SIZE]; 293 pt_entry_t pmap_cache_mask; 294 int (*copyinstr)(const void *, void *, size_t, size_t *); 295 int (*copyin)(const void *, void *, size_t); 296 int (*copyout)(const void *, void *, size_t); 297 int (*fubyte)(const void *); 298 int (*subyte)(void *, int); 299 long (*fuword)(const void *); 300 int (*suword)(void *, long); 301 int (*suword32)(void *, int); 302 }; 303 304 #define CPUMASK_LOCK CPUMASK(SMP_MAXCPU) 305 #define CPUMASK_BIT SMP_MAXCPU /* for 1LLU << SMP_MAXCPU */ 306 307 #define PMAP_FLAG_SIMPLE 0x00000001 308 #define PMAP_EMULATE_AD_BITS 0x00000002 309 310 #define pmap_resident_count(pmap) (pmap)->pm_stats.resident_count 311 312 typedef struct pmap *pmap_t; 313 314 #ifdef _KERNEL 315 extern struct pmap kernel_pmap; 316 #endif 317 318 /* 319 * For each vm_page_t, there is a list of all currently valid virtual 320 * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 321 */ 322 typedef struct pv_entry { 323 pmap_t pv_pmap; /* pmap where mapping lies */ 324 vm_pindex_t pv_pindex; /* PTE, PT, PD, PDP, or PML4 */ 325 TAILQ_ENTRY(pv_entry) pv_list; 326 RB_ENTRY(pv_entry) pv_entry; 327 struct vm_page *pv_m; /* page being mapped */ 328 u_int pv_hold; /* interlock action */ 329 u_int pv_flags; 330 #ifdef PMAP_DEBUG 331 const char *pv_func; 332 int pv_line; 333 #endif 334 } *pv_entry_t; 335 336 #define PV_HOLD_LOCKED 0x80000000U 337 #define PV_HOLD_WAITING 0x40000000U 338 #define PV_HOLD_UNUSED2000 0x20000000U 339 #define PV_HOLD_MASK 0x1FFFFFFFU 340 341 #define PV_FLAG_VMOBJECT 0x00000001U /* shared pt in VM obj */ 342 343 #ifdef _KERNEL 344 345 extern caddr_t CADDR1; 346 extern pt_entry_t *CMAP1; 347 extern vm_paddr_t dump_avail[]; 348 extern vm_paddr_t avail_end; 349 extern vm_paddr_t avail_start; 350 extern vm_offset_t clean_eva; 351 extern vm_offset_t clean_sva; 352 extern char *ptvmmap; /* poor name! */ 353 354 typedef struct vm_page *vm_page_t; 355 typedef char vm_memattr_t; 356 357 void pmap_release(struct pmap *pmap); 358 void pmap_interlock_wait (struct vmspace *); 359 void pmap_bootstrap (vm_paddr_t *); 360 void *pmap_mapbios(vm_paddr_t, vm_size_t); 361 void *pmap_mapdev (vm_paddr_t, vm_size_t); 362 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); 363 void *pmap_mapdev_uncacheable(vm_paddr_t, vm_size_t); 364 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); 365 void pmap_unmapdev (vm_offset_t, vm_size_t); 366 struct vm_page *pmap_use_pt (pmap_t, vm_offset_t); 367 void pmap_set_opt (void); 368 void pmap_init_pat(void); 369 vm_paddr_t pmap_kextract(vm_offset_t); 370 void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t); 371 void pmap_invalidate_cache_pages(vm_page_t *pages, int count); 372 void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); 373 374 static __inline int 375 pmap_emulate_ad_bits(pmap_t pmap) { 376 return pmap->pm_flags & PMAP_EMULATE_AD_BITS; 377 } 378 379 #endif /* _KERNEL */ 380 381 #endif /* !LOCORE */ 382 383 #endif /* !_MACHINE_PMAP_H_ */ 384