1 /* $NetBSD: pmap.h,v 1.66 2002/11/02 07:07:09 perry Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgment: 18 * This product includes software developed by Charles D. Cranor and 19 * Washington University. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * pmap.h: see pmap.c for the history of this pmap module. 37 */ 38 39 #ifndef _I386_PMAP_H_ 40 #define _I386_PMAP_H_ 41 42 #if defined(_KERNEL_OPT) 43 #include "opt_user_ldt.h" 44 #include "opt_largepages.h" 45 #endif 46 47 #include <machine/cpufunc.h> 48 #include <machine/pte.h> 49 #include <machine/segments.h> 50 #include <uvm/uvm_object.h> 51 52 /* 53 * see pte.h for a description of i386 MMU terminology and hardware 54 * interface. 55 * 56 * a pmap describes a processes' 4GB virtual address space. this 57 * virtual address space can be broken up into 1024 4MB regions which 58 * are described by PDEs in the PDP. the PDEs are defined as follows: 59 * 60 * (ranges are inclusive -> exclusive, just like vm_map_entry start/end) 61 * (the following assumes that KERNBASE is 0xc0000000) 62 * 63 * PDE#s VA range usage 64 * 0->766 0x0 -> 0xbfc00000 user address space, note that the 65 * max user address is 0xbfbfe000 66 * the final two pages in the last 4MB 67 * used to be reserved for the UAREA 68 * but now are no longer used 69 * 767 0xbfc00000-> recursive mapping of PDP (used for 70 * 0xc0000000 linear mapping of PTPs) 71 * 768->1023 0xc0000000-> kernel address space (constant 72 * 0xffc00000 across all pmap's/processes) 73 * 1023 0xffc00000-> "alternate" recursive PDP mapping 74 * <end> (for other pmaps) 75 * 76 * 77 * note: a recursive PDP mapping provides a way to map all the PTEs for 78 * a 4GB address space into a linear chunk of virtual memory. in other 79 * words, the PTE for page 0 is the first int mapped into the 4MB recursive 80 * area. the PTE for page 1 is the second int. the very last int in the 81 * 4MB range is the PTE that maps VA 0xffffe000 (the last page in a 4GB 82 * address). 83 * 84 * all pmap's PD's must have the same values in slots 768->1023 so that 85 * the kernel is always mapped in every process. these values are loaded 86 * into the PD at pmap creation time. 87 * 88 * at any one time only one pmap can be active on a processor. this is 89 * the pmap whose PDP is pointed to by processor register %cr3. this pmap 90 * will have all its PTEs mapped into memory at the recursive mapping 91 * point (slot #767 as show above). when the pmap code wants to find the 92 * PTE for a virtual address, all it has to do is the following: 93 * 94 * address of PTE = (767 * 4MB) + (VA / NBPG) * sizeof(pt_entry_t) 95 * = 0xbfc00000 + (VA / 4096) * 4 96 * 97 * what happens if the pmap layer is asked to perform an operation 98 * on a pmap that is not the one which is currently active? in that 99 * case we take the PA of the PDP of non-active pmap and put it in 100 * slot 1023 of the active pmap. this causes the non-active pmap's 101 * PTEs to get mapped in the final 4MB of the 4GB address space 102 * (e.g. starting at 0xffc00000). 103 * 104 * the following figure shows the effects of the recursive PDP mapping: 105 * 106 * PDP (%cr3) 107 * +----+ 108 * | 0| -> PTP#0 that maps VA 0x0 -> 0x400000 109 * | | 110 * | | 111 * | 767| -> points back to PDP (%cr3) mapping VA 0xbfc00000 -> 0xc0000000 112 * | 768| -> first kernel PTP (maps 0xc0000000 -> 0xf0400000) 113 * | | 114 * |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end) 115 * +----+ 116 * 117 * note that the PDE#767 VA (0xbfc00000) is defined as "PTE_BASE" 118 * note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE" 119 * 120 * starting at VA 0xbfc00000 the current active PDP (%cr3) acts as a 121 * PTP: 122 * 123 * PTP#767 == PDP(%cr3) => maps VA 0xbfc00000 -> 0xc0000000 124 * +----+ 125 * | 0| -> maps the contents of PTP#0 at VA 0xbfc00000->0xbfc01000 126 * | | 127 * | | 128 * | 767| -> maps contents of PTP#767 (the PDP) at VA 0xbffbf000 129 * | 768| -> maps contents of first kernel PTP 130 * | | 131 * |1023| 132 * +----+ 133 * 134 * note that mapping of the PDP at PTP#767's VA (0xbffbf000) is 135 * defined as "PDP_BASE".... within that mapping there are two 136 * defines: 137 * "PDP_PDE" (0xbfeffbfc) is the VA of the PDE in the PDP 138 * which points back to itself. 139 * "APDP_PDE" (0xbfeffffc) is the VA of the PDE in the PDP which 140 * establishes the recursive mapping of the alternate pmap. 141 * to set the alternate PDP, one just has to put the correct 142 * PA info in *APDP_PDE. 143 * 144 * note that in the APTE_BASE space, the APDP appears at VA 145 * "APDP_BASE" (0xfffff000). 146 */ 147 /* XXX MP should we allocate one APDP_PDE per processor?? */ 148 149 /* 150 * the following defines identify the slots used as described above. 151 */ 152 153 #define PDSLOT_PTE ((KERNBASE/NBPD)-1) /* 767: for recursive PDP map */ 154 #define PDSLOT_KERN (KERNBASE/NBPD) /* 768: start of kernel space */ 155 #define PDSLOT_APTE ((unsigned)1023) /* 1023: alternative recursive slot */ 156 157 /* 158 * the following defines give the virtual addresses of various MMU 159 * data structures: 160 * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings 161 * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD 162 * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP 163 */ 164 165 #define PTE_BASE ((pt_entry_t *) (PDSLOT_PTE * NBPD) ) 166 #define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD) ) 167 #define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG))) 168 #define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG))) 169 #define PDP_PDE (PDP_BASE + PDSLOT_PTE) 170 #define APDP_PDE (PDP_BASE + PDSLOT_APTE) 171 172 /* 173 * the follow define determines how many PTPs should be set up for the 174 * kernel by locore.s at boot time. this should be large enough to 175 * get the VM system running. once the VM system is running, the 176 * pmap module can add more PTPs to the kernel area on demand. 177 */ 178 179 #ifndef NKPTP 180 #define NKPTP 4 /* 16MB to start */ 181 #endif 182 #define NKPTP_MIN 4 /* smallest value we allow */ 183 #define NKPTP_MAX (1024 - (KERNBASE/NBPD) - 1) 184 /* largest value (-1 for APTP space) */ 185 186 /* 187 * pdei/ptei: generate index into PDP/PTP from a VA 188 */ 189 #define pdei(VA) (((VA) & PD_MASK) >> PDSHIFT) 190 #define ptei(VA) (((VA) & PT_MASK) >> PGSHIFT) 191 192 /* 193 * PTP macros: 194 * a PTP's index is the PD index of the PDE that points to it 195 * a PTP's offset is the byte-offset in the PTE space that this PTP is at 196 * a PTP's VA is the first VA mapped by that PTP 197 * 198 * note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries) 199 * NBPD == number of bytes a PTP can map (4MB) 200 */ 201 202 #define ptp_i2o(I) ((I) * NBPG) /* index => offset */ 203 #define ptp_o2i(O) ((O) / NBPG) /* offset => index */ 204 #define ptp_i2v(I) ((I) * NBPD) /* index => VA */ 205 #define ptp_v2i(V) ((V) / NBPD) /* VA => index (same as pdei) */ 206 207 /* 208 * PG_AVAIL usage: we make use of the ignored bits of the PTE 209 */ 210 211 #define PG_W PG_AVAIL1 /* "wired" mapping */ 212 #define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */ 213 /* PG_AVAIL3 not used */ 214 215 /* 216 * Number of PTE's per cache line. 4 byte pte, 32-byte cache line 217 * Used to avoid false sharing of cache lines. 218 */ 219 #define NPTECL 8 220 221 #ifdef _KERNEL 222 /* 223 * pmap data structures: see pmap.c for details of locking. 224 */ 225 226 struct pmap; 227 typedef struct pmap *pmap_t; 228 229 /* 230 * we maintain a list of all non-kernel pmaps 231 */ 232 233 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */ 234 235 /* 236 * the pmap structure 237 * 238 * note that the pm_obj contains the simple_lock, the reference count, 239 * page list, and number of PTPs within the pmap. 240 * 241 * XXX If we ever support processor numbers higher than 31, we'll have 242 * XXX to rethink the CPU mask. 243 */ 244 245 struct pmap { 246 struct uvm_object pm_obj; /* object (lck by object lock) */ 247 #define pm_lock pm_obj.vmobjlock 248 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */ 249 pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */ 250 u_int32_t pm_pdirpa; /* PA of PD (read-only after create) */ 251 struct vm_page *pm_ptphint; /* pointer to a PTP in our pmap */ 252 struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */ 253 254 int pm_flags; /* see below */ 255 256 union descriptor *pm_ldt; /* user-set LDT */ 257 int pm_ldt_len; /* number of LDT entries */ 258 int pm_ldt_sel; /* LDT selector */ 259 u_int32_t pm_cpus; /* mask of CPUs using pmap */ 260 }; 261 262 /* pm_flags */ 263 #define PMF_USER_LDT 0x01 /* pmap has user-set LDT */ 264 265 /* 266 * for each managed physical page we maintain a list of <PMAP,VA>'s 267 * which it is mapped at. the list is headed by a pv_head structure. 268 * there is one pv_head per managed phys page (allocated at boot time). 269 * the pv_head structure points to a list of pv_entry structures (each 270 * describes one mapping). 271 */ 272 273 struct pv_entry; 274 275 struct pv_head { 276 struct simplelock pvh_lock; /* locks every pv on this list */ 277 struct pv_entry *pvh_list; /* head of list (locked by pvh_lock) */ 278 }; 279 280 struct pv_entry { /* locked by its list's pvh_lock */ 281 struct pv_entry *pv_next; /* next entry */ 282 struct pmap *pv_pmap; /* the pmap */ 283 vaddr_t pv_va; /* the virtual address */ 284 struct vm_page *pv_ptp; /* the vm_page of the PTP */ 285 }; 286 287 /* 288 * pv_entrys are dynamically allocated in chunks from a single page. 289 * we keep track of how many pv_entrys are in use for each page and 290 * we can free pv_entry pages if needed. there is one lock for the 291 * entire allocation system. 292 */ 293 294 struct pv_page_info { 295 TAILQ_ENTRY(pv_page) pvpi_list; 296 struct pv_entry *pvpi_pvfree; 297 int pvpi_nfree; 298 }; 299 300 /* 301 * number of pv_entry's in a pv_page 302 * (note: won't work on systems where NPBG isn't a constant) 303 */ 304 305 #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \ 306 sizeof(struct pv_entry)) 307 308 /* 309 * a pv_page: where pv_entrys are allocated from 310 */ 311 312 struct pv_page { 313 struct pv_page_info pvinfo; 314 struct pv_entry pvents[PVE_PER_PVPAGE]; 315 }; 316 317 /* 318 * global kernel variables 319 */ 320 321 /* PTDpaddr: is the physical address of the kernel's PDP */ 322 extern u_long PTDpaddr; 323 324 extern struct pmap kernel_pmap_store; /* kernel pmap */ 325 extern int nkpde; /* current # of PDEs for kernel */ 326 extern int pmap_pg_g; /* do we support PG_G? */ 327 328 /* 329 * macros 330 */ 331 332 #define pmap_kernel() (&kernel_pmap_store) 333 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 334 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 335 #define pmap_update(pmap) /* nothing (yet) */ 336 337 #define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M) 338 #define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U) 339 #define pmap_copy(DP,SP,D,L,S) 340 #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M) 341 #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U) 342 #define pmap_move(DP,SP,D,L,S) 343 #define pmap_phys_address(ppn) i386_ptob(ppn) 344 #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */ 345 346 347 /* 348 * prototypes 349 */ 350 351 void pmap_activate __P((struct proc *)); 352 void pmap_bootstrap __P((vaddr_t)); 353 boolean_t pmap_clear_attrs __P((struct vm_page *, int)); 354 void pmap_deactivate __P((struct proc *)); 355 void pmap_page_remove __P((struct vm_page *)); 356 void pmap_remove __P((struct pmap *, vaddr_t, vaddr_t)); 357 boolean_t pmap_test_attrs __P((struct vm_page *, int)); 358 void pmap_write_protect __P((struct pmap *, vaddr_t, 359 vaddr_t, vm_prot_t)); 360 361 vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */ 362 363 void pmap_tlb_shootdown __P((pmap_t, vaddr_t, pt_entry_t, int32_t *)); 364 void pmap_tlb_shootnow __P((int32_t)); 365 void pmap_do_tlb_shootdown __P((struct cpu_info *)); 366 367 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ 368 369 /* 370 * Do idle page zero'ing uncached to avoid polluting the cache. 371 */ 372 boolean_t pmap_pageidlezero __P((paddr_t)); 373 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa)) 374 375 /* 376 * inline functions 377 */ 378 379 /*ARGSUSED*/ 380 static __inline void 381 pmap_remove_all(struct pmap *pmap) 382 { 383 /* Nothing. */ 384 } 385 386 /* 387 * pmap_update_pg: flush one page from the TLB (or flush the whole thing 388 * if hardware doesn't support one-page flushing) 389 */ 390 391 __inline static void __attribute__((__unused__)) 392 pmap_update_pg(vaddr_t va) 393 { 394 #if defined(I386_CPU) 395 if (cpu_class == CPUCLASS_386) 396 tlbflush(); 397 else 398 #endif 399 invlpg((u_int) va); 400 } 401 402 /* 403 * pmap_update_2pg: flush two pages from the TLB 404 */ 405 406 __inline static void __attribute__((__unused__)) 407 pmap_update_2pg(vaddr_t va, vaddr_t vb) 408 { 409 #if defined(I386_CPU) 410 if (cpu_class == CPUCLASS_386) 411 tlbflush(); 412 else 413 #endif 414 { 415 invlpg((u_int) va); 416 invlpg((u_int) vb); 417 } 418 } 419 420 /* 421 * pmap_page_protect: change the protection of all recorded mappings 422 * of a managed page 423 * 424 * => this function is a frontend for pmap_page_remove/pmap_clear_attrs 425 * => we only have to worry about making the page more protected. 426 * unprotecting a page is done on-demand at fault time. 427 */ 428 429 __inline static void __attribute__((__unused__)) 430 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 431 { 432 if ((prot & VM_PROT_WRITE) == 0) { 433 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 434 (void) pmap_clear_attrs(pg, PG_RW); 435 } else { 436 pmap_page_remove(pg); 437 } 438 } 439 } 440 441 /* 442 * pmap_protect: change the protection of pages in a pmap 443 * 444 * => this function is a frontend for pmap_remove/pmap_write_protect 445 * => we only have to worry about making the page more protected. 446 * unprotecting a page is done on-demand at fault time. 447 */ 448 449 __inline static void __attribute__((__unused__)) 450 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 451 { 452 if ((prot & VM_PROT_WRITE) == 0) { 453 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 454 pmap_write_protect(pmap, sva, eva, prot); 455 } else { 456 pmap_remove(pmap, sva, eva); 457 } 458 } 459 } 460 461 /* 462 * various address inlines 463 * 464 * vtopte: return a pointer to the PTE mapping a VA, works only for 465 * user and PT addresses 466 * 467 * kvtopte: return a pointer to the PTE mapping a kernel VA 468 */ 469 470 #include <lib/libkern/libkern.h> 471 472 static __inline pt_entry_t * __attribute__((__unused__)) 473 vtopte(vaddr_t va) 474 { 475 476 KASSERT(va < (PDSLOT_KERN << PDSHIFT)); 477 478 return (PTE_BASE + i386_btop(va)); 479 } 480 481 static __inline pt_entry_t * __attribute__((__unused__)) 482 kvtopte(vaddr_t va) 483 { 484 485 KASSERT(va >= (PDSLOT_KERN << PDSHIFT)); 486 487 #ifdef LARGEPAGES 488 { 489 pd_entry_t *pde; 490 491 pde = PDP_BASE + pdei(va); 492 if (*pde & PG_PS) 493 return ((pt_entry_t *)pde); 494 } 495 #endif 496 497 return (PTE_BASE + i386_btop(va)); 498 } 499 500 paddr_t vtophys __P((vaddr_t)); 501 vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, vm_prot_t)); 502 503 #if defined(USER_LDT) 504 void pmap_ldt_cleanup __P((struct proc *)); 505 #define PMAP_FORK 506 #endif /* USER_LDT */ 507 508 #endif /* _KERNEL */ 509 #endif /* _I386_PMAP_H_ */ 510