1 /* $OpenBSD: pmap.h,v 1.25 2001/12/19 08:58:05 art Exp $ */ 2 /* $NetBSD: pmap.h,v 1.44 2000/04/24 17:18:18 thorpej Exp $ */ 3 4 /* 5 * 6 * Copyright (c) 1997 Charles D. Cranor and Washington University. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgment: 19 * This product includes software developed by Charles D. Cranor and 20 * Washington University. 21 * 4. The name of the author may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /* 37 * pmap.h: see pmap.c for the history of this pmap module. 38 */ 39 40 #ifndef _I386_PMAP_H_ 41 #define _I386_PMAP_H_ 42 43 #if defined(_KERNEL) && !defined(_LKM) && defined(__NetBSD__) 44 #include "opt_user_ldt.h" 45 #endif 46 47 #include <machine/cpufunc.h> 48 #include <machine/pte.h> 49 #include <machine/segments.h> 50 #include <uvm/uvm_pglist.h> 51 #include <uvm/uvm_object.h> 52 53 /* 54 * see pte.h for a description of i386 MMU terminology and hardware 55 * interface. 56 * 57 * a pmap describes a processes' 4GB virtual address space. this 58 * virtual address space can be broken up into 1024 4MB regions which 59 * are described by PDEs in the PDP. the PDEs are defined as follows: 60 * 61 * (ranges are inclusive -> exclusive, just like vm_map_entry start/end) 62 * (the following assumes that KERNBASE is 0xc0000000) 63 * 64 * PDE#s VA range usage 65 * 0->767 0x0 -> 0xbfc00000 user address space, note that the 66 * max user address is 0xbfbfe000 67 * the final two pages in the last 4MB 68 * used to be reserved for the UAREA 69 * but now are no longer used 70 * 768 0xbfc00000-> recursive mapping of PDP (used for 71 * 0xc0000000 linear mapping of PTPs) 72 * 768->1023 0xc0000000-> kernel address space (constant 73 * 0xffc00000 across all pmap's/processes) 74 * 1023 0xffc00000-> "alternate" recursive PDP mapping 75 * <end> (for other pmaps) 76 * 77 * 78 * note: a recursive PDP mapping provides a way to map all the PTEs for 79 * a 4GB address space into a linear chunk of virtual memory. in other 80 * words, the PTE for page 0 is the first int mapped into the 4MB recursive 81 * area. the PTE for page 1 is the second int. the very last int in the 82 * 4MB range is the PTE that maps VA 0xffffe000 (the last page in a 4GB 83 * address). 84 * 85 * all pmap's PD's must have the same values in slots 768->1023 so that 86 * the kernel is always mapped in every process. these values are loaded 87 * into the PD at pmap creation time. 88 * 89 * at any one time only one pmap can be active on a processor. this is 90 * the pmap whose PDP is pointed to by processor register %cr3. this pmap 91 * will have all its PTEs mapped into memory at the recursive mapping 92 * point (slot #767 as show above). when the pmap code wants to find the 93 * PTE for a virtual address, all it has to do is the following: 94 * 95 * address of PTE = (767 * 4MB) + (VA / NBPG) * sizeof(pt_entry_t) 96 * = 0xbfc00000 + (VA / 4096) * 4 97 * 98 * what happens if the pmap layer is asked to perform an operation 99 * on a pmap that is not the one which is currently active? in that 100 * case we take the PA of the PDP of non-active pmap and put it in 101 * slot 1023 of the active pmap. this causes the non-active pmap's 102 * PTEs to get mapped in the final 4MB of the 4GB address space 103 * (e.g. starting at 0xffc00000). 104 * 105 * the following figure shows the effects of the recursive PDP mapping: 106 * 107 * PDP (%cr3) 108 * +----+ 109 * | 0| -> PTP#0 that maps VA 0x0 -> 0x400000 110 * | | 111 * | | 112 * | 767| -> points back to PDP (%cr3) mapping VA 0xbfc00000 -> 0xc0000000 113 * | 768| -> first kernel PTP (maps 0xc0000000 -> 0xf0400000) 114 * | | 115 * |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end) 116 * +----+ 117 * 118 * note that the PDE#767 VA (0xbfc00000) is defined as "PTE_BASE" 119 * note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE" 120 * 121 * starting at VA 0xbfc00000 the current active PDP (%cr3) acts as a 122 * PTP: 123 * 124 * PTP#767 == PDP(%cr3) => maps VA 0xbfc00000 -> 0xc0000000 125 * +----+ 126 * | 0| -> maps the contents of PTP#0 at VA 0xbfc00000->0xbfc01000 127 * | | 128 * | | 129 * | 767| -> maps contents of PTP#767 (the PDP) at VA 0xbffbf000 130 * | 768| -> maps contents of first kernel PTP 131 * | | 132 * |1023| 133 * +----+ 134 * 135 * note that mapping of the PDP at PTP#959's VA (0xeffbf000) is 136 * defined as "PDP_BASE".... within that mapping there are two 137 * defines: 138 * "PDP_PDE" (0xeffbfefc) is the VA of the PDE in the PDP 139 * which points back to itself. 140 * "APDP_PDE" (0xeffbfffc) is the VA of the PDE in the PDP which 141 * establishes the recursive mapping of the alternate pmap. 142 * to set the alternate PDP, one just has to put the correct 143 * PA info in *APDP_PDE. 144 * 145 * note that in the APTE_BASE space, the APDP appears at VA 146 * "APDP_BASE" (0xfffff000). 147 */ 148 149 /* 150 * the following defines identify the slots used as described above. 151 */ 152 153 #define PDSLOT_PTE ((KERNBASE/NBPD)-1) /* 767: for recursive PDP map */ 154 #define PDSLOT_KERN (KERNBASE/NBPD) /* 768: start of kernel space */ 155 #define PDSLOT_APTE ((unsigned)1023) /* 1023: alternative recursive slot */ 156 157 /* 158 * the following defines give the virtual addresses of various MMU 159 * data structures: 160 * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings 161 * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD 162 * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP 163 */ 164 165 #define PTE_BASE ((pt_entry_t *) (PDSLOT_PTE * NBPD) ) 166 #define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD) ) 167 #define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG))) 168 #define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG))) 169 #define PDP_PDE (PDP_BASE + PDSLOT_PTE) 170 #define APDP_PDE (PDP_BASE + PDSLOT_APTE) 171 172 /* 173 * XXXCDC: tmp xlate from old names: 174 * PTDPTDI -> PDSLOT_PTE 175 * KPTDI -> PDSLOT_KERN 176 * APTDPTDI -> PDSLOT_APTE 177 */ 178 179 /* 180 * the follow define determines how many PTPs should be set up for the 181 * kernel by locore.s at boot time. this should be large enough to 182 * get the VM system running. once the VM system is running, the 183 * pmap module can add more PTPs to the kernel area on demand. 184 */ 185 186 #ifndef NKPTP 187 #define NKPTP 4 /* 16MB to start */ 188 #endif 189 #define NKPTP_MIN 4 /* smallest value we allow */ 190 #define NKPTP_MAX (1024 - (KERNBASE/NBPD) - 1) 191 /* largest value (-1 for APTP space) */ 192 193 /* 194 * various address macros 195 * 196 * vtopte: return a pointer to the PTE mapping a VA 197 * kvtopte: same as above (takes a KVA, but doesn't matter with this pmap) 198 * ptetov: given a pointer to a PTE, return the VA that it maps 199 * vtophys: translate a VA to the PA mapped to it 200 * 201 * plus alternative versions of the above 202 */ 203 204 #define vtopte(VA) (PTE_BASE + i386_btop(VA)) 205 #define kvtopte(VA) vtopte(VA) 206 #define ptetov(PT) (i386_ptob(PT - PTE_BASE)) 207 #define vtophys(VA) ((*vtopte(VA) & PG_FRAME) | \ 208 ((unsigned)(VA) & ~PG_FRAME)) 209 #define avtopte(VA) (APTE_BASE + i386_btop(VA)) 210 #define ptetoav(PT) (i386_ptob(PT - APTE_BASE)) 211 #define avtophys(VA) ((*avtopte(VA) & PG_FRAME) | \ 212 ((unsigned)(VA) & ~PG_FRAME)) 213 214 /* 215 * pdei/ptei: generate index into PDP/PTP from a VA 216 */ 217 #define pdei(VA) (((VA) & PD_MASK) >> PDSHIFT) 218 #define ptei(VA) (((VA) & PT_MASK) >> PGSHIFT) 219 220 /* 221 * PTP macros: 222 * a PTP's index is the PD index of the PDE that points to it 223 * a PTP's offset is the byte-offset in the PTE space that this PTP is at 224 * a PTP's VA is the first VA mapped by that PTP 225 * 226 * note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries) 227 * NBPD == number of bytes a PTP can map (4MB) 228 */ 229 230 #define ptp_i2o(I) ((I) * NBPG) /* index => offset */ 231 #define ptp_o2i(O) ((O) / NBPG) /* offset => index */ 232 #define ptp_i2v(I) ((I) * NBPD) /* index => VA */ 233 #define ptp_v2i(V) ((V) / NBPD) /* VA => index (same as pdei) */ 234 235 /* 236 * PG_AVAIL usage: we make use of the ignored bits of the PTE 237 */ 238 239 #define PG_W PG_AVAIL1 /* "wired" mapping */ 240 #define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */ 241 /* PG_AVAIL3 not used */ 242 243 #ifdef _KERNEL 244 /* 245 * pmap data structures: see pmap.c for details of locking. 246 */ 247 248 struct pmap; 249 typedef struct pmap *pmap_t; 250 251 /* 252 * we maintain a list of all non-kernel pmaps 253 */ 254 255 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */ 256 257 /* 258 * the pmap structure 259 * 260 * note that the pm_obj contains the simple_lock, the reference count, 261 * page list, and number of PTPs within the pmap. 262 */ 263 264 struct pmap { 265 struct uvm_object pm_obj; /* object (lck by object lock) */ 266 #define pm_lock pm_obj.vmobjlock 267 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */ 268 pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */ 269 u_int32_t pm_pdirpa; /* PA of PD (read-only after create) */ 270 struct vm_page *pm_ptphint; /* pointer to a PTP in our pmap */ 271 struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */ 272 273 int pm_flags; /* see below */ 274 275 union descriptor *pm_ldt; /* user-set LDT */ 276 int pm_ldt_len; /* number of LDT entries */ 277 int pm_ldt_sel; /* LDT selector */ 278 }; 279 280 /* pm_flags */ 281 #define PMF_USER_LDT 0x01 /* pmap has user-set LDT */ 282 283 /* 284 * for each managed physical page we maintain a list of <PMAP,VA>'s 285 * which it is mapped at. the list is headed by a pv_head structure. 286 * there is one pv_head per managed phys page (allocated at boot time). 287 * the pv_head structure points to a list of pv_entry structures (each 288 * describes one mapping). 289 */ 290 291 struct pv_entry; 292 293 struct pv_head { 294 struct simplelock pvh_lock; /* locks every pv on this list */ 295 struct pv_entry *pvh_list; /* head of list (locked by pvh_lock) */ 296 }; 297 298 struct pv_entry { /* locked by its list's pvh_lock */ 299 struct pv_entry *pv_next; /* next entry */ 300 struct pmap *pv_pmap; /* the pmap */ 301 vaddr_t pv_va; /* the virtual address */ 302 struct vm_page *pv_ptp; /* the vm_page of the PTP */ 303 }; 304 305 /* 306 * pv_entrys are dynamically allocated in chunks from a single page. 307 * we keep track of how many pv_entrys are in use for each page and 308 * we can free pv_entry pages if needed. there is one lock for the 309 * entire allocation system. 310 */ 311 312 struct pv_page_info { 313 TAILQ_ENTRY(pv_page) pvpi_list; 314 struct pv_entry *pvpi_pvfree; 315 int pvpi_nfree; 316 }; 317 318 /* 319 * number of pv_entry's in a pv_page 320 * (note: won't work on systems where NPBG isn't a constant) 321 */ 322 323 #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \ 324 sizeof(struct pv_entry)) 325 326 /* 327 * a pv_page: where pv_entrys are allocated from 328 */ 329 330 struct pv_page { 331 struct pv_page_info pvinfo; 332 struct pv_entry pvents[PVE_PER_PVPAGE]; 333 }; 334 335 /* 336 * pmap_remove_record: a record of VAs that have been unmapped, used to 337 * flush TLB. if we have more than PMAP_RR_MAX then we stop recording. 338 */ 339 340 #define PMAP_RR_MAX 16 /* max of 16 pages (64K) */ 341 342 struct pmap_remove_record { 343 int prr_npages; 344 vaddr_t prr_vas[PMAP_RR_MAX]; 345 }; 346 347 /* 348 * pmap_transfer_location: used to pass the current location in the 349 * pmap between pmap_transfer and pmap_transfer_ptes [e.g. during 350 * a pmap_copy]. 351 */ 352 353 struct pmap_transfer_location { 354 vaddr_t addr; /* the address (page-aligned) */ 355 pt_entry_t *pte; /* the PTE that maps address */ 356 struct vm_page *ptp; /* the PTP that the PTE lives in */ 357 }; 358 359 /* 360 * global kernel variables 361 */ 362 363 extern pd_entry_t PTD[]; 364 365 /* PTDpaddr: is the physical address of the kernel's PDP */ 366 extern u_long PTDpaddr; 367 368 extern struct pmap kernel_pmap_store; /* kernel pmap */ 369 extern int nkpde; /* current # of PDEs for kernel */ 370 extern int pmap_pg_g; /* do we support PG_G? */ 371 372 /* 373 * macros 374 */ 375 376 #define pmap_kernel() (&kernel_pmap_store) 377 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 378 #define pmap_update(pm) /* nada */ 379 380 #define pmap_clear_modify(pg) pmap_change_attrs(pg, 0, PG_M) 381 #define pmap_clear_reference(pg) pmap_change_attrs(pg, 0, PG_U) 382 #define pmap_copy(DP,SP,D,L,S) 383 #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M) 384 #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U) 385 #define pmap_phys_address(ppn) i386_ptob(ppn) 386 #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */ 387 388 389 /* 390 * prototypes 391 */ 392 393 void pmap_bootstrap __P((vaddr_t)); 394 boolean_t pmap_change_attrs __P((struct vm_page *, int, int)); 395 static void pmap_page_protect __P((struct vm_page *, vm_prot_t)); 396 void pmap_page_remove __P((struct vm_page *)); 397 static void pmap_protect __P((struct pmap *, vaddr_t, 398 vaddr_t, vm_prot_t)); 399 void pmap_remove __P((struct pmap *, vaddr_t, vaddr_t)); 400 boolean_t pmap_test_attrs __P((struct vm_page *, int)); 401 void pmap_transfer __P((struct pmap *, struct pmap *, vaddr_t, 402 vsize_t, vaddr_t, boolean_t)); 403 static void pmap_update_pg __P((vaddr_t)); 404 static void pmap_update_2pg __P((vaddr_t,vaddr_t)); 405 void pmap_write_protect __P((struct pmap *, vaddr_t, 406 vaddr_t, vm_prot_t)); 407 408 vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */ 409 410 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ 411 412 /* 413 * Do idle page zero'ing uncached to avoid polluting the cache. 414 */ 415 boolean_t pmap_zero_page_uncached __P((paddr_t)); 416 #define PMAP_PAGEIDLEZERO(pa) pmap_zero_page_uncached((pa)) 417 418 /* 419 * inline functions 420 */ 421 422 /* 423 * pmap_update_pg: flush one page from the TLB (or flush the whole thing 424 * if hardware doesn't support one-page flushing) 425 */ 426 427 __inline static void 428 pmap_update_pg(va) 429 vaddr_t va; 430 { 431 #if defined(I386_CPU) 432 if (cpu_class == CPUCLASS_386) 433 tlbflush(); 434 else 435 #endif 436 invlpg((u_int) va); 437 } 438 439 /* 440 * pmap_update_2pg: flush two pages from the TLB 441 */ 442 443 __inline static void 444 pmap_update_2pg(va, vb) 445 vaddr_t va, vb; 446 { 447 #if defined(I386_CPU) 448 if (cpu_class == CPUCLASS_386) 449 tlbflush(); 450 else 451 #endif 452 { 453 invlpg((u_int) va); 454 invlpg((u_int) vb); 455 } 456 } 457 458 /* 459 * pmap_page_protect: change the protection of all recorded mappings 460 * of a managed page 461 * 462 * => this function is a frontend for pmap_page_remove/pmap_change_attrs 463 * => we only have to worry about making the page more protected. 464 * unprotecting a page is done on-demand at fault time. 465 */ 466 467 __inline static void 468 pmap_page_protect(pg, prot) 469 struct vm_page *pg; 470 vm_prot_t prot; 471 { 472 if ((prot & VM_PROT_WRITE) == 0) { 473 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 474 (void) pmap_change_attrs(pg, PG_RO, PG_RW); 475 } else { 476 pmap_page_remove(pg); 477 } 478 } 479 } 480 481 /* 482 * pmap_protect: change the protection of pages in a pmap 483 * 484 * => this function is a frontend for pmap_remove/pmap_write_protect 485 * => we only have to worry about making the page more protected. 486 * unprotecting a page is done on-demand at fault time. 487 */ 488 489 __inline static void 490 pmap_protect(pmap, sva, eva, prot) 491 struct pmap *pmap; 492 vaddr_t sva, eva; 493 vm_prot_t prot; 494 { 495 if ((prot & VM_PROT_WRITE) == 0) { 496 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 497 pmap_write_protect(pmap, sva, eva, prot); 498 } else { 499 pmap_remove(pmap, sva, eva); 500 } 501 } 502 } 503 504 #if defined(USER_LDT) 505 void pmap_ldt_cleanup __P((struct proc *)); 506 #define PMAP_FORK 507 #endif /* USER_LDT */ 508 509 #endif /* _KERNEL */ 510 #endif /* _I386_PMAP_H_ */ 511