1 /* $OpenBSD: pmap.h,v 1.33 2010/05/13 19:27:24 oga Exp $ */ 2 /* $NetBSD: pmap.h,v 1.1 2003/04/26 18:39:46 fvdl Exp $ */ 3 4 /* 5 * 6 * Copyright (c) 1997 Charles D. Cranor and Washington University. 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgment: 19 * This product includes software developed by Charles D. Cranor and 20 * Washington University. 21 * 4. The name of the author may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /* 37 * Copyright (c) 2001 Wasabi Systems, Inc. 38 * All rights reserved. 39 * 40 * Written by Frank van der Linden for Wasabi Systems, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed for the NetBSD Project by 53 * Wasabi Systems, Inc. 54 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 55 * or promote products derived from this software without specific prior 56 * written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 60 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 61 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 62 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 63 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 64 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 65 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 66 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 67 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 68 * POSSIBILITY OF SUCH DAMAGE. 69 */ 70 71 /* 72 * pmap.h: see pmap.c for the history of this pmap module. 73 */ 74 75 #ifndef _AMD64_PMAP_H_ 76 #define _AMD64_PMAP_H_ 77 78 #ifndef _LOCORE 79 #include <machine/cpufunc.h> 80 #include <machine/pte.h> 81 #include <machine/segments.h> 82 #include <uvm/uvm_object.h> 83 #endif 84 85 /* 86 * The x86_64 pmap module closely resembles the i386 one. It uses 87 * the same recursive entry scheme, and the same alternate area 88 * trick for accessing non-current pmaps. See the i386 pmap.h 89 * for a description. The obvious difference is that 3 extra 90 * levels of page table need to be dealt with. The level 1 page 91 * table pages are at: 92 * 93 * l1: 0x00007f8000000000 - 0x00007fffffffffff (39 bits, needs PML4 entry) 94 * 95 * The alternate space is at: 96 * 97 * l1: 0xffffff8000000000 - 0xffffffffffffffff (39 bits, needs PML4 entry) 98 * 99 * The rest is kept as physical pages in 3 UVM objects, and is 100 * temporarily mapped for virtual access when needed. 101 * 102 * Note that address space is signed, so the layout for 48 bits is: 103 * 104 * +---------------------------------+ 0xffffffffffffffff 105 * | | 106 * | alt.L1 table (PTE pages) | 107 * | | 108 * +---------------------------------+ 0xffffff8000000000 109 * ~ ~ 110 * | | 111 * | Kernel Space | 112 * | | 113 * | | 114 * +---------------------------------+ 0xffff800000000000 = 0x0000800000000000 115 * | | 116 * | L1 table (PTE pages) | 117 * | | 118 * +---------------------------------+ 0x00007f8000000000 119 * ~ ~ 120 * | | 121 * | User Space | 122 * | | 123 * | | 124 * +---------------------------------+ 0x0000000000000000 125 * 126 * In other words, there is a 'VA hole' at 0x0000800000000000 - 127 * 0xffff800000000000 which will trap, just as on, for example, 128 * sparcv9. 129 * 130 * The unused space can be used if needed, but it adds a little more 131 * complexity to the calculations. 132 */ 133 134 /* 135 * The first generation of Hammer processors can use 48 bits of 136 * virtual memory, and 40 bits of physical memory. This will be 137 * more for later generations. These defines can be changed to 138 * variable names containing the # of bits, extracted from an 139 * extended cpuid instruction (variables are harder to use during 140 * bootstrap, though) 141 */ 142 #define VIRT_BITS 48 143 #define PHYS_BITS 40 144 145 /* 146 * Mask to get rid of the sign-extended part of addresses. 147 */ 148 #define VA_SIGN_MASK 0xffff000000000000 149 #define VA_SIGN_NEG(va) ((va) | VA_SIGN_MASK) 150 /* 151 * XXXfvdl this one's not right. 152 */ 153 #define VA_SIGN_POS(va) ((va) & ~VA_SIGN_MASK) 154 155 #define L4_SLOT_PTE 255 156 #define L4_SLOT_KERN 256 157 #define L4_SLOT_KERNBASE 511 158 #define L4_SLOT_APTE 510 159 #define L4_SLOT_DIRECT 509 160 161 #define PDIR_SLOT_KERN L4_SLOT_KERN 162 #define PDIR_SLOT_PTE L4_SLOT_PTE 163 #define PDIR_SLOT_APTE L4_SLOT_APTE 164 #define PDIR_SLOT_DIRECT L4_SLOT_DIRECT 165 166 /* 167 * the following defines give the virtual addresses of various MMU 168 * data structures: 169 * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings 170 * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD 171 * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP 172 * 173 */ 174 175 #define PTE_BASE ((pt_entry_t *) (L4_SLOT_PTE * NBPD_L4)) 176 #define APTE_BASE ((pt_entry_t *) (VA_SIGN_NEG((L4_SLOT_APTE * NBPD_L4)))) 177 #define PMAP_DIRECT_BASE (VA_SIGN_NEG((L4_SLOT_DIRECT * NBPD_L4))) 178 #define PMAP_DIRECT_END (VA_SIGN_NEG(((L4_SLOT_DIRECT + 1) * NBPD_L4))) 179 180 #define L1_BASE PTE_BASE 181 #define AL1_BASE APTE_BASE 182 183 #define L2_BASE ((pd_entry_t *)((char *)L1_BASE + L4_SLOT_PTE * NBPD_L3)) 184 #define L3_BASE ((pd_entry_t *)((char *)L2_BASE + L4_SLOT_PTE * NBPD_L2)) 185 #define L4_BASE ((pd_entry_t *)((char *)L3_BASE + L4_SLOT_PTE * NBPD_L1)) 186 187 #define AL2_BASE ((pd_entry_t *)((char *)AL1_BASE + L4_SLOT_PTE * NBPD_L3)) 188 #define AL3_BASE ((pd_entry_t *)((char *)AL2_BASE + L4_SLOT_PTE * NBPD_L2)) 189 #define AL4_BASE ((pd_entry_t *)((char *)AL3_BASE + L4_SLOT_PTE * NBPD_L1)) 190 191 #define PDP_PDE (L4_BASE + PDIR_SLOT_PTE) 192 #define APDP_PDE (L4_BASE + PDIR_SLOT_APTE) 193 194 #define PDP_BASE L4_BASE 195 #define APDP_BASE AL4_BASE 196 197 #define NKL4_MAX_ENTRIES (unsigned long)1 198 #define NKL3_MAX_ENTRIES (unsigned long)(NKL4_MAX_ENTRIES * 512) 199 #define NKL2_MAX_ENTRIES (unsigned long)(NKL3_MAX_ENTRIES * 512) 200 #define NKL1_MAX_ENTRIES (unsigned long)(NKL2_MAX_ENTRIES * 512) 201 202 #define NKL4_KIMG_ENTRIES 1 203 #define NKL3_KIMG_ENTRIES 1 204 #define NKL2_KIMG_ENTRIES 8 205 206 #define NDML4_ENTRIES 1 207 #define NDML3_ENTRIES 1 208 #define NDML2_ENTRIES 4 /* 4GB */ 209 210 /* 211 * Since kva space is below the kernel in its entirety, we start off 212 * with zero entries on each level. 213 */ 214 #define NKL4_START_ENTRIES 0 215 #define NKL3_START_ENTRIES 0 216 #define NKL2_START_ENTRIES 0 217 #define NKL1_START_ENTRIES 0 /* XXX */ 218 219 #define NTOPLEVEL_PDES (PAGE_SIZE / (sizeof (pd_entry_t))) 220 221 #define KERNSPACE (NKL4_ENTRIES * NBPD_L4) 222 223 #define NPDPG (PAGE_SIZE / sizeof (pd_entry_t)) 224 225 /* 226 * pl*_pi: index in the ptp page for a pde mapping a VA. 227 * (pl*_i below is the index in the virtual array of all pdes per level) 228 */ 229 #define pl1_pi(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT) 230 #define pl2_pi(VA) (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT) 231 #define pl3_pi(VA) (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT) 232 #define pl4_pi(VA) (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT) 233 234 /* 235 * pl*_i: generate index into pde/pte arrays in virtual space 236 */ 237 #define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT) 238 #define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT) 239 #define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT) 240 #define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT) 241 #define pl_i(va, lvl) \ 242 (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1]) 243 244 #define PTP_MASK_INITIALIZER { L1_FRAME, L2_FRAME, L3_FRAME, L4_FRAME } 245 #define PTP_SHIFT_INITIALIZER { L1_SHIFT, L2_SHIFT, L3_SHIFT, L4_SHIFT } 246 #define NKPTP_INITIALIZER { NKL1_START_ENTRIES, NKL2_START_ENTRIES, \ 247 NKL3_START_ENTRIES, NKL4_START_ENTRIES } 248 #define NKPTPMAX_INITIALIZER { NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES, \ 249 NKL3_MAX_ENTRIES, NKL4_MAX_ENTRIES } 250 #define NBPD_INITIALIZER { NBPD_L1, NBPD_L2, NBPD_L3, NBPD_L4 } 251 #define PDES_INITIALIZER { L2_BASE, L3_BASE, L4_BASE } 252 #define APDES_INITIALIZER { AL2_BASE, AL3_BASE, AL4_BASE } 253 254 /* 255 * PTP macros: 256 * a PTP's index is the PD index of the PDE that points to it 257 * a PTP's offset is the byte-offset in the PTE space that this PTP is at 258 * a PTP's VA is the first VA mapped by that PTP 259 * 260 * note that PAGE_SIZE == number of bytes in a PTP (4096 bytes == 1024 entries) 261 * NBPD == number of bytes a PTP can map (4MB) 262 */ 263 264 #define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * PAGE_SIZE) 265 266 #define PTP_LEVELS 4 267 268 /* 269 * PG_AVAIL usage: we make use of the ignored bits of the PTE 270 */ 271 272 #define PG_W PG_AVAIL1 /* "wired" mapping */ 273 #define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */ 274 /* PG_AVAIL3 not used */ 275 276 /* 277 * Number of PTE's per cache line. 8 byte pte, 64-byte cache line 278 * Used to avoid false sharing of cache lines. 279 */ 280 #define NPTECL 8 281 282 283 #if defined(_KERNEL) && !defined(_LOCORE) 284 /* 285 * pmap data structures: see pmap.c for details of locking. 286 */ 287 288 struct pmap; 289 typedef struct pmap *pmap_t; 290 291 /* 292 * we maintain a list of all non-kernel pmaps 293 */ 294 295 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */ 296 297 /* 298 * the pmap structure 299 * 300 * note that the pm_obj contains the simple_lock, the reference count, 301 * page list, and number of PTPs within the pmap. 302 * 303 * pm_lock is the same as the spinlock for vm object 0. Changes to 304 * the other objects may only be made if that lock has been taken 305 * (the other object locks are only used when uvm_pagealloc is called) 306 */ 307 308 struct pmap { 309 struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */ 310 #define pm_lock pm_obj[0].vmobjlock 311 #define pm_obj_l1 pm_obj[0] 312 #define pm_obj_l2 pm_obj[1] 313 #define pm_obj_l3 pm_obj[2] 314 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */ 315 pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */ 316 paddr_t pm_pdirpa; /* PA of PD (read-only after create) */ 317 struct vm_page *pm_ptphint[PTP_LEVELS-1]; 318 /* pointer to a PTP in our pmap */ 319 struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */ 320 321 int pm_flags; /* see below */ 322 323 union descriptor *pm_ldt; /* user-set LDT */ 324 int pm_ldt_len; /* number of LDT entries */ 325 int pm_ldt_sel; /* LDT selector */ 326 u_int32_t pm_cpus; /* mask of CPUs using pmap */ 327 }; 328 329 /* 330 * MD flags that we use for pmap_enter (in the pa): 331 */ 332 #define PMAP_PA_MASK ~((paddr_t)PAGE_MASK) /* to remove the flags */ 333 #define PMAP_NOCACHE 0x1 /* set the non-cacheable bit. */ 334 #define PMAP_WC 0x2 /* set page write combining. */ 335 336 /* 337 * We keep mod/ref flags in struct vm_page->pg_flags. 338 */ 339 #define PG_PMAP_MOD PG_PMAP0 340 #define PG_PMAP_REF PG_PMAP1 341 #define PG_PMAP_WC PG_PMAP2 342 343 /* 344 * for each managed physical page we maintain a list of <PMAP,VA>'s 345 * which it is mapped at. 346 */ 347 struct pv_entry { /* locked by its list's pvh_lock */ 348 struct pv_entry *pv_next; /* next entry */ 349 struct pmap *pv_pmap; /* the pmap */ 350 vaddr_t pv_va; /* the virtual address */ 351 struct vm_page *pv_ptp; /* the vm_page of the PTP */ 352 }; 353 354 /* 355 * pmap_remove_record: a record of VAs that have been unmapped, used to 356 * flush TLB. if we have more than PMAP_RR_MAX then we stop recording. 357 */ 358 359 #define PMAP_RR_MAX 16 /* max of 16 pages (64K) */ 360 361 struct pmap_remove_record { 362 int prr_npages; 363 vaddr_t prr_vas[PMAP_RR_MAX]; 364 }; 365 366 /* 367 * global kernel variables 368 */ 369 370 /* PTDpaddr: is the physical address of the kernel's PDP */ 371 extern u_long PTDpaddr; 372 373 extern struct pmap kernel_pmap_store; /* kernel pmap */ 374 extern int pmap_pg_g; /* do we support PG_G? */ 375 376 extern paddr_t ptp_masks[]; 377 extern int ptp_shifts[]; 378 extern long nkptp[], nbpd[], nkptpmax[]; 379 extern pd_entry_t *pdes[]; 380 381 /* 382 * macros 383 */ 384 385 #define pmap_kernel() (&kernel_pmap_store) 386 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 387 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 388 #define pmap_update(pmap) /* nothing (yet) */ 389 390 #define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M) 391 #define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U) 392 #define pmap_copy(DP,SP,D,L,S) 393 #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M) 394 #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U) 395 #define pmap_move(DP,SP,D,L,S) 396 #define pmap_phys_address(ppn) ptoa(ppn) 397 #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */ 398 399 #define pmap_proc_iflush(p,va,len) /* nothing */ 400 #define pmap_unuse_final(p) /* nothing */ 401 #define pmap_remove_holes(map) do { /* nothing */ } while (0) 402 403 404 /* 405 * prototypes 406 */ 407 408 paddr_t pmap_bootstrap(paddr_t, paddr_t); 409 boolean_t pmap_clear_attrs(struct vm_page *, unsigned long); 410 static void pmap_page_protect(struct vm_page *, vm_prot_t); 411 void pmap_page_remove (struct vm_page *); 412 static void pmap_protect(struct pmap *, vaddr_t, 413 vaddr_t, vm_prot_t); 414 void pmap_remove(struct pmap *, vaddr_t, vaddr_t); 415 boolean_t pmap_test_attrs(struct vm_page *, unsigned); 416 static void pmap_update_pg(vaddr_t); 417 static void pmap_update_2pg(vaddr_t,vaddr_t); 418 void pmap_write_protect(struct pmap *, vaddr_t, 419 vaddr_t, vm_prot_t); 420 421 vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */ 422 423 void pmap_tlb_shootpage(struct pmap *, vaddr_t); 424 void pmap_tlb_shootrange(struct pmap *, vaddr_t, vaddr_t); 425 void pmap_tlb_shoottlb(void); 426 #ifdef MULTIPROCESSOR 427 void pmap_tlb_shootwait(void); 428 #else 429 #define pmap_tlb_shootwait() 430 #endif 431 432 paddr_t pmap_prealloc_lowmem_ptps(paddr_t); 433 434 void pagezero(vaddr_t); 435 436 /* 437 * functions for flushing the cache for vaddrs and pages. 438 * these functions are not part of the MI pmap interface and thus 439 * should not be used as such. 440 */ 441 void pmap_flush_cache(vaddr_t, vsize_t); 442 #define pmap_flush_page(paddr) do { \ 443 KDASSERT(PHYS_TO_VM_PAGE(paddr) != NULL); \ 444 pmap_flush_cache(PMAP_DIRECT_MAP(paddr), PAGE_SIZE); \ 445 } while (/* CONSTCOND */ 0) 446 447 #define PMAP_STEAL_MEMORY /* enable pmap_steal_memory() */ 448 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ 449 450 /* 451 * Do idle page zero'ing uncached to avoid polluting the cache. 452 */ 453 boolean_t pmap_pageidlezero(struct vm_page *); 454 #define PMAP_PAGEIDLEZERO(pg) pmap_pageidlezero((pg)) 455 456 /* 457 * inline functions 458 */ 459 460 static __inline void 461 pmap_remove_all(struct pmap *pmap) 462 { 463 /* Nothing. */ 464 } 465 466 /* 467 * pmap_update_pg: flush one page from the TLB (or flush the whole thing 468 * if hardware doesn't support one-page flushing) 469 */ 470 471 __inline static void 472 pmap_update_pg(va) 473 vaddr_t va; 474 { 475 invlpg(va); 476 } 477 478 /* 479 * pmap_update_2pg: flush two pages from the TLB 480 */ 481 482 __inline static void 483 pmap_update_2pg(va, vb) 484 vaddr_t va, vb; 485 { 486 invlpg(va); 487 invlpg(vb); 488 } 489 490 /* 491 * pmap_page_protect: change the protection of all recorded mappings 492 * of a managed page 493 * 494 * => this function is a frontend for pmap_page_remove/pmap_clear_attrs 495 * => we only have to worry about making the page more protected. 496 * unprotecting a page is done on-demand at fault time. 497 */ 498 499 __inline static void 500 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 501 { 502 if ((prot & VM_PROT_WRITE) == 0) { 503 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 504 (void) pmap_clear_attrs(pg, PG_RW); 505 } else { 506 pmap_page_remove(pg); 507 } 508 } 509 } 510 511 /* 512 * pmap_protect: change the protection of pages in a pmap 513 * 514 * => this function is a frontend for pmap_remove/pmap_write_protect 515 * => we only have to worry about making the page more protected. 516 * unprotecting a page is done on-demand at fault time. 517 */ 518 519 __inline static void 520 pmap_protect(pmap, sva, eva, prot) 521 struct pmap *pmap; 522 vaddr_t sva, eva; 523 vm_prot_t prot; 524 { 525 if ((prot & VM_PROT_WRITE) == 0) { 526 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 527 pmap_write_protect(pmap, sva, eva, prot); 528 } else { 529 pmap_remove(pmap, sva, eva); 530 } 531 } 532 } 533 534 /* 535 * various address inlines 536 * 537 * vtopte: return a pointer to the PTE mapping a VA, works only for 538 * user and PT addresses 539 * 540 * kvtopte: return a pointer to the PTE mapping a kernel VA 541 */ 542 543 static __inline pt_entry_t * 544 vtopte(vaddr_t va) 545 { 546 return (PTE_BASE + pl1_i(va)); 547 } 548 549 static __inline pt_entry_t * 550 kvtopte(vaddr_t va) 551 { 552 #ifdef LARGEPAGES 553 { 554 pd_entry_t *pde; 555 556 pde = L1_BASE + pl2_i(va); 557 if (*pde & PG_PS) 558 return ((pt_entry_t *)pde); 559 } 560 #endif 561 562 return (PTE_BASE + pl1_i(va)); 563 } 564 565 #define pmap_pte_set(p, n) x86_atomic_testset_u64(p, n) 566 #define pmap_pte_clearbits(p, b) x86_atomic_clearbits_u64(p, b) 567 #define pmap_pte_setbits(p, b) x86_atomic_setbits_u64(p, b) 568 #define pmap_cpu_has_pg_n() (1) 569 #define pmap_cpu_has_invlpg (1) 570 571 #define PMAP_DIRECT_MAP(pa) ((vaddr_t)PMAP_DIRECT_BASE + pa) 572 #define PMAP_DIRECT_UNMAP(va) ((paddr_t)va - PMAP_DIRECT_BASE) 573 #define pmap_map_direct(pg) PMAP_DIRECT_MAP(VM_PAGE_TO_PHYS(pg)) 574 #define pmap_unmap_direct(va) PHYS_TO_VM_PAGE(PMAP_DIRECT_UNMAP(va)) 575 576 #define __HAVE_PMAP_DIRECT 577 578 #endif /* _KERNEL && !_LOCORE */ 579 #endif /* _AMD64_PMAP_H_ */ 580