1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * Copyright (c) 2003 Peter Wemm. 4 * Copyright (c) 2008 The DragonFly Project. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department and William Jolitz of UUNET Technologies Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * Derived from hp300 version by Mike Hibler, this version by William 36 * Jolitz uses a recursive map [a pde points to the page directory] to 37 * map the page tables using the pagetables themselves. This is done to 38 * reduce the impact on kernel virtual memory for lots of sparse address 39 * space, and to reduce the cost of memory to each process. 40 * 41 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 42 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 43 * $FreeBSD: src/sys/i386/include/pmap.h,v 1.65.2.3 2001/10/03 07:15:37 peter Exp $ 44 */ 45 46 #ifndef _MACHINE_PMAP_H_ 47 #define _MACHINE_PMAP_H_ 48 49 #include <cpu/pmap.h> 50 51 /* 52 * Pte related macros. This is complicated by having to deal with 53 * the sign extension of the 48th bit. 54 */ 55 #define KVADDR(l4, l3, l2, l1) ( \ 56 ((unsigned long)-1 << 47) | \ 57 ((unsigned long)(l4) << PML4SHIFT) | \ 58 ((unsigned long)(l3) << PDPSHIFT) | \ 59 ((unsigned long)(l2) << PDRSHIFT) | \ 60 ((unsigned long)(l1) << PAGE_SHIFT)) 61 62 #define UVADDR(l4, l3, l2, l1) ( \ 63 ((unsigned long)(l4) << PML4SHIFT) | \ 64 ((unsigned long)(l3) << PDPSHIFT) | \ 65 ((unsigned long)(l2) << PDRSHIFT) | \ 66 ((unsigned long)(l1) << PAGE_SHIFT)) 67 68 /* 69 * NKPML4E is the number of PML4E slots used for KVM. Each slot represents 70 * 512GB of KVM. A number between 1 and 128 may be specified. To support 71 * the maximum machine configuration of 64TB we recommend around 72 * 16 slots (8TB of KVM). 73 * 74 * NOTE: We no longer hardwire NKPT, it is calculated in create_pagetables() 75 */ 76 #define NKPML4E 16 77 /* NKPDPE defined in vmparam.h */ 78 79 /* 80 * NUPDPs 512 (256 user) number of PDPs in user page table 81 * NUPDs 512 * 512 number of PDs in user page table 82 * NUPTs 512 * 512 * 512 number of PTs in user page table 83 * NUPTEs 512 * 512 * 512 * 512 number of PTEs in user page table 84 * 85 * NUPDP_USER number of PDPs reserved for userland 86 * NUPTE_USER number of PTEs reserved for userland (big number) 87 */ 88 #define NUPDP_USER (NPML4EPG/2) 89 #define NUPDP_TOTAL (NPML4EPG) 90 #define NUPD_TOTAL (NPDPEPG * NUPDP_TOTAL) 91 #define NUPT_TOTAL (NPDEPG * NUPD_TOTAL) 92 #define NUPTE_TOTAL ((vm_pindex_t)NPTEPG * NUPT_TOTAL) 93 #define NUPTE_USER ((vm_pindex_t)NPTEPG * NPDEPG * NPDPEPG * NUPDP_USER) 94 95 /* 96 * Number of 512G dmap PML4 slots. There are 512 slots of which 256 are 97 * used by the kernel. Of those 256 we allow up to 128 to be used by the 98 * DMAP (for 64TB of ram), leaving 128 for the kernel and other incidentals. 99 */ 100 #define NDMPML4E 128 101 102 /* 103 * The *PML4I values control the layout of virtual memory. Each PML4 104 * entry represents 512G. 105 */ 106 #define PML4PML4I (NPML4EPG/2) /* Index of recursive pml4 mapping */ 107 108 #define KPML4I (NPML4EPG-NKPML4E) /* Start of KVM */ 109 #define DMPML4I (KPML4I-NDMPML4E) /* Next 512GBxN down for dmap */ 110 111 /* 112 * Make sure the kernel map and DMAP don't overflow the 256 PDP entries 113 * we have available. Minus one for the PML4PML4I. 114 */ 115 #if NKPML4E + NDMPML4E >= 255 116 #error "NKPML4E or NDMPML4E is too large" 117 #endif 118 119 /* 120 * The location of KERNBASE in the last PD of the kernel's KVM (KPML4I) 121 * space. Each PD represents 1GB. The kernel must be placed here 122 * for the compile/link options to work properly so absolute 32-bit 123 * addressing can be used to access stuff. 124 */ 125 #define KPDPI (NPDPEPG-2) /* kernbase at -2GB */ 126 127 /* 128 * per-CPU data assume ~64K x SMP_MAXCPU, say up to 256 cpus 129 * in the future or 16MB of space. Each PD represents 2MB so 130 * use NPDEPG-8 to place the per-CPU data. 131 */ 132 #define MPPML4I (KPML4I + NKPML4E - 1) 133 #define MPPDPI KPDPI 134 #define MPPTDI (NPDEPG-8) 135 136 /* 137 * XXX doesn't really belong here I guess... 138 */ 139 #define ISA_HOLE_START 0xa0000 140 #define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START) 141 142 #ifndef LOCORE 143 144 #ifndef _SYS_TYPES_H_ 145 #include <sys/types.h> 146 #endif 147 #ifndef _SYS_CPUMASK_H_ 148 #include <sys/cpumask.h> 149 #endif 150 #ifndef _SYS_QUEUE_H_ 151 #include <sys/queue.h> 152 #endif 153 #ifndef _SYS_TREE_H_ 154 #include <sys/tree.h> 155 #endif 156 #ifndef _SYS_SPINLOCK_H_ 157 #include <sys/spinlock.h> 158 #endif 159 #ifndef _SYS_THREAD_H_ 160 #include <sys/thread.h> 161 #endif 162 #ifndef _MACHINE_TYPES_H_ 163 #include <machine/types.h> 164 #endif 165 #ifndef _MACHINE_PARAM_H_ 166 #include <machine/param.h> 167 #endif 168 169 /* 170 * Address of current and alternate address space page table maps 171 * and directories. 172 */ 173 #ifdef _KERNEL 174 #define addr_PTmap (KVADDR(PML4PML4I, 0, 0, 0)) 175 #define addr_PDmap (KVADDR(PML4PML4I, PML4PML4I, 0, 0)) 176 #define addr_PDPmap (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0)) 177 #define addr_PML4map (KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I)) 178 #define addr_PML4pml4e (addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t))) 179 #define PTmap ((pt_entry_t *)(addr_PTmap)) 180 #define PDmap ((pd_entry_t *)(addr_PDmap)) 181 #define PDPmap ((pd_entry_t *)(addr_PDPmap)) 182 #define PML4map ((pd_entry_t *)(addr_PML4map)) 183 #define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e)) 184 185 extern uint64_t KPDPphys; /* phys addr of kernel level 3 */ 186 extern uint64_t KPML4phys; /* physical address of kernel level 4 */ 187 #endif 188 189 #ifdef _KERNEL 190 191 /* 192 * XXX 193 */ 194 #define vtophys(va) pmap_kextract(((vm_offset_t)(va))) 195 #define vtophys_pte(va) ((pt_entry_t)pmap_kextract(((vm_offset_t)(va)))) 196 197 #endif 198 199 #define pte_load_clear(pte) atomic_readandclear_long(pte) 200 201 /* 202 * Pmap stuff 203 */ 204 struct pmap; 205 struct pv_entry; 206 struct vm_page; 207 struct vm_object; 208 struct vmspace; 209 210 /* 211 * vm_page structure extension for pmap. Track the number of pmap mappings 212 * for a managed page. Unmanaged pages do not use this field. 213 */ 214 struct md_page { 215 long interlock_count; 216 long writeable_count_unused; 217 }; 218 219 #define MD_PAGE_FREEABLE(m) \ 220 (((m)->flags & (PG_MAPPED | PG_WRITEABLE)) == 0) 221 222 /* 223 * vm_object's representing large mappings can contain embedded pmaps 224 * to organize sharing at higher page table levels for PROT_READ and 225 * PROT_READ|PROT_WRITE maps. 226 */ 227 struct md_object { 228 void *dummy_unused; 229 }; 230 231 /* 232 * Each machine dependent implementation is expected to 233 * keep certain statistics. They may do this anyway they 234 * so choose, but are expected to return the statistics 235 * in the following structure. 236 * 237 * NOTE: We try to match the size of the pc32 pmap with the vkernel pmap 238 * so the same utilities (like 'ps') can be used on both. 239 */ 240 struct pmap_statistics { 241 long resident_count; /* # of pages mapped (total) */ 242 long wired_count; /* # of pages wired */ 243 }; 244 typedef struct pmap_statistics *pmap_statistics_t; 245 246 struct pv_entry_rb_tree; 247 RB_PROTOTYPE2(pv_entry_rb_tree, pv_entry, pv_entry, 248 pv_entry_compare, vm_pindex_t); 249 250 /* Types of PMAP (regular, EPT Intel, NPT Amd) */ 251 #define REGULAR_PMAP 0 252 #define EPT_PMAP 1 253 254 /* Bits indexes in pmap_bits */ 255 #define TYPE_IDX 0 256 #define PG_V_IDX 1 257 #define PG_RW_IDX 2 258 #define PG_U_IDX 3 259 #define PG_A_IDX 4 260 #define PG_M_IDX 5 261 #define PG_PS_IDX 6 262 #define PG_G_IDX 7 263 #define PG_W_IDX 8 264 #define PG_MANAGED_IDX 9 265 #define PG_UNUSED10_IDX 10 266 #define PG_N_IDX 11 267 #define PG_NX_IDX 12 268 #define PG_BITS_SIZE 13 269 270 #define PROTECTION_CODES_SIZE 8 271 #define PAT_INDEX_SIZE 8 272 273 #define PM_PLACEMARKS 64 /* 16 @ 4 zones */ 274 #define PM_NOPLACEMARK ((vm_pindex_t)-1) 275 #define PM_PLACEMARK_WAKEUP ((vm_pindex_t)0x8000000000000000LLU) 276 277 struct pmap { 278 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */ 279 pml4_entry_t *pm_pml4_iso; /* (isolated version) */ 280 struct pv_entry *pm_pmlpv; /* PV entry for pml4 */ 281 struct pv_entry *pm_pmlpv_iso; /* (isolated version) */ 282 TAILQ_ENTRY(pmap) pm_pmnode; /* list of pmaps */ 283 RB_HEAD(pv_entry_rb_tree, pv_entry) pm_pvroot; 284 int pm_count; /* reference count */ 285 cpulock_t pm_active_lock; /* interlock */ 286 cpumask_t pm_active; /* active on cpus */ 287 int pm_flags; 288 uint32_t pm_softhold; 289 struct pmap_statistics pm_stats; /* pmap statistics */ 290 struct spinlock pm_spin; 291 struct pv_entry *pm_pvhint_pt; /* pv_entry lookup hint */ 292 struct pv_entry *pm_pvhint_unused; 293 vm_pindex_t pm_placemarks[PM_PLACEMARKS]; 294 long pm_invgen; 295 uint64_t pmap_bits[PG_BITS_SIZE]; 296 uint64_t protection_codes[PROTECTION_CODES_SIZE]; 297 pt_entry_t pmap_cache_bits_pte[PAT_INDEX_SIZE]; 298 pt_entry_t pmap_cache_bits_pde[PAT_INDEX_SIZE]; 299 pt_entry_t pmap_cache_mask_pte; 300 pt_entry_t pmap_cache_mask_pde; 301 int (*copyinstr)(const void *, void *, size_t, size_t *); 302 int (*copyin)(const void *, void *, size_t); 303 int (*copyout)(const void *, void *, size_t); 304 int (*fubyte)(const uint8_t *); /* returns int for -1 err */ 305 int (*subyte)(uint8_t *, uint8_t); 306 int32_t (*fuword32)(const uint32_t *); 307 int64_t (*fuword64)(const uint64_t *); 308 int (*suword64)(uint64_t *, uint64_t); 309 int (*suword32)(uint32_t *, int); 310 uint32_t (*swapu32)(volatile uint32_t *, uint32_t v); 311 uint64_t (*swapu64)(volatile uint64_t *, uint64_t v); 312 uint32_t (*fuwordadd32)(volatile uint32_t *, uint32_t v); 313 uint64_t (*fuwordadd64)(volatile uint64_t *, uint64_t v); 314 }; 315 316 #define PMAP_FLAG_SIMPLE 0x00000001 317 #define PMAP_EMULATE_AD_BITS 0x00000002 318 #define PMAP_HVM 0x00000004 319 #define PMAP_SEGSHARED 0x00000008 /* segment shared opt */ 320 #define PMAP_MULTI 0x00000010 /* multi-threaded use */ 321 322 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 323 #define pmap_resident_tlnw_count(pmap) ((pmap)->pm_stats.resident_count - \ 324 (pmap)->pm_stats.wired_count) 325 326 typedef struct pmap *pmap_t; 327 328 #ifdef _KERNEL 329 extern struct pmap kernel_pmap; 330 #endif 331 332 /* 333 * The pv_entry structure is used to track higher levels of the page table. 334 * The leaf PTE is no longer tracked with this structure. 335 */ 336 typedef struct pv_entry { 337 pmap_t pv_pmap; /* pmap where mapping lies */ 338 vm_pindex_t pv_pindex; /* PTE, PT, PD, PDP, or PML4 */ 339 RB_ENTRY(pv_entry) pv_entry; 340 struct vm_page *pv_m; /* page being mapped */ 341 u_int pv_hold; /* interlock action */ 342 u_int pv_flags; 343 #ifdef PMAP_DEBUG 344 const char *pv_func; 345 int pv_line; 346 const char *pv_func_lastfree; 347 int pv_line_lastfree; 348 #endif 349 } *pv_entry_t; 350 351 #define PV_HOLD_LOCKED 0x80000000U 352 #define PV_HOLD_WAITING 0x40000000U 353 #define PV_HOLD_UNUSED2000 0x20000000U 354 #define PV_HOLD_MASK 0x1FFFFFFFU 355 356 #define PV_FLAG_UNUSED01 0x00000001U 357 #define PV_FLAG_UNUSED02 0x00000002U 358 359 #ifdef _KERNEL 360 361 extern caddr_t CADDR1; 362 extern pt_entry_t *CMAP1; 363 extern vm_paddr_t avail_end; 364 extern vm_paddr_t avail_start; 365 extern vm_offset_t clean_eva; 366 extern vm_offset_t clean_sva; 367 extern char *ptvmmap; /* poor name! */ 368 369 #ifndef __VM_PAGE_T_DEFINED__ 370 #define __VM_PAGE_T_DEFINED__ 371 typedef struct vm_page *vm_page_t; 372 #endif 373 #ifndef __VM_MEMATTR_T_DEFINED__ 374 #define __VM_MEMATTR_T_DEFINED__ 375 typedef char vm_memattr_t; 376 #endif 377 378 void pmap_release(struct pmap *pmap); 379 void pmap_interlock_wait (struct vmspace *); 380 void pmap_bootstrap (vm_paddr_t *); 381 void *pmap_mapbios(vm_paddr_t, vm_size_t); 382 void *pmap_mapdev (vm_paddr_t, vm_size_t); 383 void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); 384 void *pmap_mapdev_uncacheable(vm_paddr_t, vm_size_t); 385 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); 386 void pmap_unmapdev (vm_offset_t, vm_size_t); 387 struct vm_page *pmap_use_pt (pmap_t, vm_offset_t); 388 void pmap_set_opt (void); 389 void pmap_init_pat(void); 390 void pmap_invalidate_cache_pages(vm_page_t *pages, int count); 391 void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); 392 393 static __inline int 394 pmap_emulate_ad_bits(pmap_t pmap) { 395 return pmap->pm_flags & PMAP_EMULATE_AD_BITS; 396 } 397 398 /* Return various clipped indexes for a given VA */ 399 400 /* 401 * Returns the index of a PTE in a PT, representing a terminal 402 * page. 403 */ 404 static __inline vm_pindex_t 405 pmap_pte_index(vm_offset_t va) 406 { 407 return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1)); 408 } 409 410 /* 411 * Returns the index of a PT in a PD 412 */ 413 static __inline vm_pindex_t 414 pmap_pde_index(vm_offset_t va) 415 { 416 return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1)); 417 } 418 419 /* 420 * Returns the index of a PD in a PDP 421 */ 422 static __inline vm_pindex_t 423 pmap_pdpe_index(vm_offset_t va) 424 { 425 return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1)); 426 } 427 428 /* 429 * Returns the index of a PDP in the PML4 430 */ 431 static __inline vm_pindex_t 432 pmap_pml4e_index(vm_offset_t va) 433 { 434 return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1)); 435 } 436 437 #endif /* _KERNEL */ 438 439 #endif /* !LOCORE */ 440 441 #endif /* !_MACHINE_PMAP_H_ */ 442