1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2018 Matthew Macy 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "opt_platform.h" 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/kernel.h> 35 #include <sys/systm.h> 36 #include <sys/conf.h> 37 #include <sys/bitstring.h> 38 #include <sys/queue.h> 39 #include <sys/cpuset.h> 40 #include <sys/endian.h> 41 #include <sys/kerneldump.h> 42 #include <sys/ktr.h> 43 #include <sys/lock.h> 44 #include <sys/syslog.h> 45 #include <sys/msgbuf.h> 46 #include <sys/malloc.h> 47 #include <sys/mman.h> 48 #include <sys/mutex.h> 49 #include <sys/proc.h> 50 #include <sys/rwlock.h> 51 #include <sys/sched.h> 52 #include <sys/sysctl.h> 53 #include <sys/systm.h> 54 #include <sys/vmem.h> 55 #include <sys/vmmeter.h> 56 #include <sys/smp.h> 57 58 #include <sys/kdb.h> 59 60 #include <dev/ofw/openfirm.h> 61 62 #include <vm/vm.h> 63 #include <vm/pmap.h> 64 #include <vm/vm_param.h> 65 #include <vm/vm_kern.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_map.h> 68 #include <vm/vm_object.h> 69 #include <vm/vm_extern.h> 70 #include <vm/vm_pageout.h> 71 #include <vm/vm_phys.h> 72 #include <vm/vm_reserv.h> 73 #include <vm/vm_dumpset.h> 74 #include <vm/uma.h> 75 76 #include <machine/_inttypes.h> 77 #include <machine/cpu.h> 78 #include <machine/platform.h> 79 #include <machine/frame.h> 80 #include <machine/md_var.h> 81 #include <machine/psl.h> 82 #include <machine/bat.h> 83 #include <machine/hid.h> 84 #include <machine/pte.h> 85 #include <machine/sr.h> 86 #include <machine/trap.h> 87 #include <machine/mmuvar.h> 88 89 /* For pseries bit. */ 90 #include <powerpc/pseries/phyp-hvcall.h> 91 92 #ifdef INVARIANTS 93 #include <vm/uma_dbg.h> 94 #endif 95 96 #define PPC_BITLSHIFT(bit) (sizeof(long)*NBBY - 1 - (bit)) 97 #define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit)) 98 #define PPC_BITLSHIFT_VAL(val, bit) ((val) << PPC_BITLSHIFT(bit)) 99 100 #include "opt_ddb.h" 101 102 #ifdef DDB 103 static void pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va); 104 #endif 105 106 #define PG_W RPTE_WIRED 107 #define PG_V RPTE_VALID 108 #define PG_MANAGED RPTE_MANAGED 109 #define PG_PROMOTED RPTE_PROMOTED 110 #define PG_M RPTE_C 111 #define PG_A RPTE_R 112 #define PG_X RPTE_EAA_X 113 #define PG_RW RPTE_EAA_W 114 #define PG_PTE_CACHE RPTE_ATTR_MASK 115 116 #define RPTE_SHIFT 9 117 #define NLS_MASK ((1UL<<5)-1) 118 #define RPTE_ENTRIES (1UL<<RPTE_SHIFT) 119 #define RPTE_MASK (RPTE_ENTRIES-1) 120 121 #define NLB_SHIFT 0 122 #define NLB_MASK (((1UL<<52)-1) << 8) 123 124 extern int nkpt; 125 extern caddr_t crashdumpmap; 126 127 #define RIC_FLUSH_TLB 0 128 #define RIC_FLUSH_PWC 1 129 #define RIC_FLUSH_ALL 2 130 131 #define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */ 132 133 #define PPC_INST_TLBIE 0x7c000264 134 #define PPC_INST_TLBIEL 0x7c000224 135 #define PPC_INST_SLBIA 0x7c0003e4 136 137 #define ___PPC_RA(a) (((a) & 0x1f) << 16) 138 #define ___PPC_RB(b) (((b) & 0x1f) << 11) 139 #define ___PPC_RS(s) (((s) & 0x1f) << 21) 140 #define ___PPC_RT(t) ___PPC_RS(t) 141 #define ___PPC_R(r) (((r) & 0x1) << 16) 142 #define ___PPC_PRS(prs) (((prs) & 0x1) << 17) 143 #define ___PPC_RIC(ric) (((ric) & 0x3) << 18) 144 145 #define PPC_SLBIA(IH) __XSTRING(.long PPC_INST_SLBIA | \ 146 ((IH & 0x7) << 21)) 147 #define PPC_TLBIE_5(rb,rs,ric,prs,r) \ 148 __XSTRING(.long PPC_INST_TLBIE | \ 149 ___PPC_RB(rb) | ___PPC_RS(rs) | \ 150 ___PPC_RIC(ric) | ___PPC_PRS(prs) | \ 151 ___PPC_R(r)) 152 153 #define PPC_TLBIEL(rb,rs,ric,prs,r) \ 154 __XSTRING(.long PPC_INST_TLBIEL | \ 155 ___PPC_RB(rb) | ___PPC_RS(rs) | \ 156 ___PPC_RIC(ric) | ___PPC_PRS(prs) | \ 157 ___PPC_R(r)) 158 159 #define PPC_INVALIDATE_ERAT PPC_SLBIA(7) 160 161 static __inline void 162 ttusync(void) 163 { 164 __asm __volatile("eieio; tlbsync; ptesync" ::: "memory"); 165 } 166 167 #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */ 168 #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */ 169 #define TLBIEL_INVAL_SET_PID 0x400 /* invalidate a set for the current PID */ 170 #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */ 171 #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */ 172 173 #define TLBIE_ACTUAL_PAGE_MASK 0xe0 174 #define TLBIE_ACTUAL_PAGE_4K 0x00 175 #define TLBIE_ACTUAL_PAGE_64K 0xa0 176 #define TLBIE_ACTUAL_PAGE_2M 0x20 177 #define TLBIE_ACTUAL_PAGE_1G 0x40 178 179 #define TLBIE_PRS_PARTITION_SCOPE 0x0 180 #define TLBIE_PRS_PROCESS_SCOPE 0x1 181 182 #define TLBIE_RIC_INVALIDATE_TLB 0x0 /* Invalidate just TLB */ 183 #define TLBIE_RIC_INVALIDATE_PWC 0x1 /* Invalidate just PWC */ 184 #define TLBIE_RIC_INVALIDATE_ALL 0x2 /* Invalidate TLB, PWC, 185 * cached {proc, part}tab entries 186 */ 187 #define TLBIE_RIC_INVALIDATE_SEQ 0x3 /* HPT - only: 188 * Invalidate a range of translations 189 */ 190 191 static __always_inline void 192 radix_tlbie(uint8_t ric, uint8_t prs, uint16_t is, uint32_t pid, uint32_t lpid, 193 vm_offset_t va, uint16_t ap) 194 { 195 uint64_t rb, rs; 196 197 MPASS((va & PAGE_MASK) == 0); 198 199 rs = ((uint64_t)pid << 32) | lpid; 200 rb = va | is | ap; 201 __asm __volatile(PPC_TLBIE_5(%0, %1, %2, %3, 1) : : 202 "r" (rb), "r" (rs), "i" (ric), "i" (prs) : "memory"); 203 } 204 205 static __inline void 206 radix_tlbie_fixup(uint32_t pid, vm_offset_t va, int ap) 207 { 208 209 __asm __volatile("ptesync" ::: "memory"); 210 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE, 211 TLBIEL_INVAL_PAGE, 0, 0, va, ap); 212 __asm __volatile("ptesync" ::: "memory"); 213 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE, 214 TLBIEL_INVAL_PAGE, pid, 0, va, ap); 215 } 216 217 static __inline void 218 radix_tlbie_invlpg_user_4k(uint32_t pid, vm_offset_t va) 219 { 220 221 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE, 222 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_4K); 223 radix_tlbie_fixup(pid, va, TLBIE_ACTUAL_PAGE_4K); 224 } 225 226 static __inline void 227 radix_tlbie_invlpg_user_2m(uint32_t pid, vm_offset_t va) 228 { 229 230 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE, 231 TLBIEL_INVAL_PAGE, pid, 0, va, TLBIE_ACTUAL_PAGE_2M); 232 radix_tlbie_fixup(pid, va, TLBIE_ACTUAL_PAGE_2M); 233 } 234 235 static __inline void 236 radix_tlbie_invlpwc_user(uint32_t pid) 237 { 238 239 radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE, 240 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0); 241 } 242 243 static __inline void 244 radix_tlbie_flush_user(uint32_t pid) 245 { 246 247 radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE, 248 TLBIEL_INVAL_SET_PID, pid, 0, 0, 0); 249 } 250 251 static __inline void 252 radix_tlbie_invlpg_kernel_4k(vm_offset_t va) 253 { 254 255 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE, 256 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_4K); 257 radix_tlbie_fixup(0, va, TLBIE_ACTUAL_PAGE_4K); 258 } 259 260 static __inline void 261 radix_tlbie_invlpg_kernel_2m(vm_offset_t va) 262 { 263 264 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE, 265 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_2M); 266 radix_tlbie_fixup(0, va, TLBIE_ACTUAL_PAGE_2M); 267 } 268 269 /* 1GB pages aren't currently supported. */ 270 static __inline __unused void 271 radix_tlbie_invlpg_kernel_1g(vm_offset_t va) 272 { 273 274 radix_tlbie(TLBIE_RIC_INVALIDATE_TLB, TLBIE_PRS_PROCESS_SCOPE, 275 TLBIEL_INVAL_PAGE, 0, 0, va, TLBIE_ACTUAL_PAGE_1G); 276 radix_tlbie_fixup(0, va, TLBIE_ACTUAL_PAGE_1G); 277 } 278 279 static __inline void 280 radix_tlbie_invlpwc_kernel(void) 281 { 282 283 radix_tlbie(TLBIE_RIC_INVALIDATE_PWC, TLBIE_PRS_PROCESS_SCOPE, 284 TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0); 285 } 286 287 static __inline void 288 radix_tlbie_flush_kernel(void) 289 { 290 291 radix_tlbie(TLBIE_RIC_INVALIDATE_ALL, TLBIE_PRS_PROCESS_SCOPE, 292 TLBIEL_INVAL_SET_LPID, 0, 0, 0, 0); 293 } 294 295 static __inline vm_pindex_t 296 pmap_l3e_pindex(vm_offset_t va) 297 { 298 return ((va & PG_FRAME) >> L3_PAGE_SIZE_SHIFT); 299 } 300 301 static __inline vm_pindex_t 302 pmap_pml3e_index(vm_offset_t va) 303 { 304 305 return ((va >> L3_PAGE_SIZE_SHIFT) & RPTE_MASK); 306 } 307 308 static __inline vm_pindex_t 309 pmap_pml2e_index(vm_offset_t va) 310 { 311 return ((va >> L2_PAGE_SIZE_SHIFT) & RPTE_MASK); 312 } 313 314 static __inline vm_pindex_t 315 pmap_pml1e_index(vm_offset_t va) 316 { 317 return ((va & PG_FRAME) >> L1_PAGE_SIZE_SHIFT); 318 } 319 320 /* Return various clipped indexes for a given VA */ 321 static __inline vm_pindex_t 322 pmap_pte_index(vm_offset_t va) 323 { 324 325 return ((va >> PAGE_SHIFT) & RPTE_MASK); 326 } 327 328 /* Return a pointer to the PT slot that corresponds to a VA */ 329 static __inline pt_entry_t * 330 pmap_l3e_to_pte(pt_entry_t *l3e, vm_offset_t va) 331 { 332 pt_entry_t *pte; 333 vm_paddr_t ptepa; 334 335 ptepa = (be64toh(*l3e) & NLB_MASK); 336 pte = (pt_entry_t *)PHYS_TO_DMAP(ptepa); 337 return (&pte[pmap_pte_index(va)]); 338 } 339 340 /* Return a pointer to the PD slot that corresponds to a VA */ 341 static __inline pt_entry_t * 342 pmap_l2e_to_l3e(pt_entry_t *l2e, vm_offset_t va) 343 { 344 pt_entry_t *l3e; 345 vm_paddr_t l3pa; 346 347 l3pa = (be64toh(*l2e) & NLB_MASK); 348 l3e = (pml3_entry_t *)PHYS_TO_DMAP(l3pa); 349 return (&l3e[pmap_pml3e_index(va)]); 350 } 351 352 /* Return a pointer to the PD slot that corresponds to a VA */ 353 static __inline pt_entry_t * 354 pmap_l1e_to_l2e(pt_entry_t *l1e, vm_offset_t va) 355 { 356 pt_entry_t *l2e; 357 vm_paddr_t l2pa; 358 359 l2pa = (be64toh(*l1e) & NLB_MASK); 360 361 l2e = (pml2_entry_t *)PHYS_TO_DMAP(l2pa); 362 return (&l2e[pmap_pml2e_index(va)]); 363 } 364 365 static __inline pml1_entry_t * 366 pmap_pml1e(pmap_t pmap, vm_offset_t va) 367 { 368 369 return (&pmap->pm_pml1[pmap_pml1e_index(va)]); 370 } 371 372 static pt_entry_t * 373 pmap_pml2e(pmap_t pmap, vm_offset_t va) 374 { 375 pt_entry_t *l1e; 376 377 l1e = pmap_pml1e(pmap, va); 378 if (l1e == NULL || (be64toh(*l1e) & RPTE_VALID) == 0) 379 return (NULL); 380 return (pmap_l1e_to_l2e(l1e, va)); 381 } 382 383 static __inline pt_entry_t * 384 pmap_pml3e(pmap_t pmap, vm_offset_t va) 385 { 386 pt_entry_t *l2e; 387 388 l2e = pmap_pml2e(pmap, va); 389 if (l2e == NULL || (be64toh(*l2e) & RPTE_VALID) == 0) 390 return (NULL); 391 return (pmap_l2e_to_l3e(l2e, va)); 392 } 393 394 static __inline pt_entry_t * 395 pmap_pte(pmap_t pmap, vm_offset_t va) 396 { 397 pt_entry_t *l3e; 398 399 l3e = pmap_pml3e(pmap, va); 400 if (l3e == NULL || (be64toh(*l3e) & RPTE_VALID) == 0) 401 return (NULL); 402 return (pmap_l3e_to_pte(l3e, va)); 403 } 404 405 int nkpt = 64; 406 SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0, 407 "Number of kernel page table pages allocated on bootup"); 408 409 vm_paddr_t dmaplimit; 410 411 SYSCTL_DECL(_vm_pmap); 412 413 #ifdef INVARIANTS 414 #define VERBOSE_PMAP 0 415 #define VERBOSE_PROTECT 0 416 static int pmap_logging; 417 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_logging, CTLFLAG_RWTUN, 418 &pmap_logging, 0, "verbose debug logging"); 419 #endif 420 421 static u_int64_t KPTphys; /* phys addr of kernel level 1 */ 422 423 //static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */ 424 425 static vm_offset_t qframe = 0; 426 static struct mtx qframe_mtx; 427 428 void mmu_radix_activate(struct thread *); 429 void mmu_radix_advise(pmap_t, vm_offset_t, vm_offset_t, int); 430 void mmu_radix_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *, 431 vm_size_t); 432 void mmu_radix_clear_modify(vm_page_t); 433 void mmu_radix_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t); 434 int mmu_radix_decode_kernel_ptr(vm_offset_t, int *, vm_offset_t *); 435 int mmu_radix_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t); 436 void mmu_radix_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 437 vm_prot_t); 438 void mmu_radix_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 439 vm_paddr_t mmu_radix_extract(pmap_t pmap, vm_offset_t va); 440 vm_page_t mmu_radix_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t); 441 void mmu_radix_kenter(vm_offset_t, vm_paddr_t); 442 vm_paddr_t mmu_radix_kextract(vm_offset_t); 443 void mmu_radix_kremove(vm_offset_t); 444 boolean_t mmu_radix_is_modified(vm_page_t); 445 boolean_t mmu_radix_is_prefaultable(pmap_t, vm_offset_t); 446 boolean_t mmu_radix_is_referenced(vm_page_t); 447 void mmu_radix_object_init_pt(pmap_t, vm_offset_t, vm_object_t, 448 vm_pindex_t, vm_size_t); 449 boolean_t mmu_radix_page_exists_quick(pmap_t, vm_page_t); 450 void mmu_radix_page_init(vm_page_t); 451 boolean_t mmu_radix_page_is_mapped(vm_page_t m); 452 void mmu_radix_page_set_memattr(vm_page_t, vm_memattr_t); 453 int mmu_radix_page_wired_mappings(vm_page_t); 454 int mmu_radix_pinit(pmap_t); 455 void mmu_radix_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 456 bool mmu_radix_ps_enabled(pmap_t); 457 void mmu_radix_qenter(vm_offset_t, vm_page_t *, int); 458 void mmu_radix_qremove(vm_offset_t, int); 459 vm_offset_t mmu_radix_quick_enter_page(vm_page_t); 460 void mmu_radix_quick_remove_page(vm_offset_t); 461 boolean_t mmu_radix_ts_referenced(vm_page_t); 462 void mmu_radix_release(pmap_t); 463 void mmu_radix_remove(pmap_t, vm_offset_t, vm_offset_t); 464 void mmu_radix_remove_all(vm_page_t); 465 void mmu_radix_remove_pages(pmap_t); 466 void mmu_radix_remove_write(vm_page_t); 467 void mmu_radix_unwire(pmap_t, vm_offset_t, vm_offset_t); 468 void mmu_radix_zero_page(vm_page_t); 469 void mmu_radix_zero_page_area(vm_page_t, int, int); 470 int mmu_radix_change_attr(vm_offset_t, vm_size_t, vm_memattr_t); 471 void mmu_radix_page_array_startup(long pages); 472 473 #include "mmu_oea64.h" 474 475 /* 476 * Kernel MMU interface 477 */ 478 479 static void mmu_radix_bootstrap(vm_offset_t, vm_offset_t); 480 481 static void mmu_radix_copy_page(vm_page_t, vm_page_t); 482 static void mmu_radix_copy_pages(vm_page_t *ma, vm_offset_t a_offset, 483 vm_page_t *mb, vm_offset_t b_offset, int xfersize); 484 static void mmu_radix_growkernel(vm_offset_t); 485 static void mmu_radix_init(void); 486 static int mmu_radix_mincore(pmap_t, vm_offset_t, vm_paddr_t *); 487 static vm_offset_t mmu_radix_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); 488 static void mmu_radix_pinit0(pmap_t); 489 490 static void *mmu_radix_mapdev(vm_paddr_t, vm_size_t); 491 static void *mmu_radix_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t); 492 static void mmu_radix_unmapdev(vm_offset_t, vm_size_t); 493 static void mmu_radix_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma); 494 static boolean_t mmu_radix_dev_direct_mapped(vm_paddr_t, vm_size_t); 495 static void mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz, void **va); 496 static void mmu_radix_scan_init(void); 497 static void mmu_radix_cpu_bootstrap(int ap); 498 static void mmu_radix_tlbie_all(void); 499 500 static struct pmap_funcs mmu_radix_methods = { 501 .bootstrap = mmu_radix_bootstrap, 502 .copy_page = mmu_radix_copy_page, 503 .copy_pages = mmu_radix_copy_pages, 504 .cpu_bootstrap = mmu_radix_cpu_bootstrap, 505 .growkernel = mmu_radix_growkernel, 506 .init = mmu_radix_init, 507 .map = mmu_radix_map, 508 .mincore = mmu_radix_mincore, 509 .pinit = mmu_radix_pinit, 510 .pinit0 = mmu_radix_pinit0, 511 512 .mapdev = mmu_radix_mapdev, 513 .mapdev_attr = mmu_radix_mapdev_attr, 514 .unmapdev = mmu_radix_unmapdev, 515 .kenter_attr = mmu_radix_kenter_attr, 516 .dev_direct_mapped = mmu_radix_dev_direct_mapped, 517 .dumpsys_pa_init = mmu_radix_scan_init, 518 .dumpsys_map_chunk = mmu_radix_dumpsys_map, 519 .page_is_mapped = mmu_radix_page_is_mapped, 520 .ps_enabled = mmu_radix_ps_enabled, 521 .align_superpage = mmu_radix_align_superpage, 522 .object_init_pt = mmu_radix_object_init_pt, 523 .protect = mmu_radix_protect, 524 /* pmap dispatcher interface */ 525 .clear_modify = mmu_radix_clear_modify, 526 .copy = mmu_radix_copy, 527 .enter = mmu_radix_enter, 528 .enter_object = mmu_radix_enter_object, 529 .enter_quick = mmu_radix_enter_quick, 530 .extract = mmu_radix_extract, 531 .extract_and_hold = mmu_radix_extract_and_hold, 532 .is_modified = mmu_radix_is_modified, 533 .is_prefaultable = mmu_radix_is_prefaultable, 534 .is_referenced = mmu_radix_is_referenced, 535 .ts_referenced = mmu_radix_ts_referenced, 536 .page_exists_quick = mmu_radix_page_exists_quick, 537 .page_init = mmu_radix_page_init, 538 .page_wired_mappings = mmu_radix_page_wired_mappings, 539 .qenter = mmu_radix_qenter, 540 .qremove = mmu_radix_qremove, 541 .release = mmu_radix_release, 542 .remove = mmu_radix_remove, 543 .remove_all = mmu_radix_remove_all, 544 .remove_write = mmu_radix_remove_write, 545 .unwire = mmu_radix_unwire, 546 .zero_page = mmu_radix_zero_page, 547 .zero_page_area = mmu_radix_zero_page_area, 548 .activate = mmu_radix_activate, 549 .quick_enter_page = mmu_radix_quick_enter_page, 550 .quick_remove_page = mmu_radix_quick_remove_page, 551 .page_set_memattr = mmu_radix_page_set_memattr, 552 .page_array_startup = mmu_radix_page_array_startup, 553 554 /* Internal interfaces */ 555 .kenter = mmu_radix_kenter, 556 .kextract = mmu_radix_kextract, 557 .kremove = mmu_radix_kremove, 558 .change_attr = mmu_radix_change_attr, 559 .decode_kernel_ptr = mmu_radix_decode_kernel_ptr, 560 561 .tlbie_all = mmu_radix_tlbie_all, 562 }; 563 564 MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods); 565 566 static boolean_t pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va, 567 struct rwlock **lockp); 568 static boolean_t pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va); 569 static int pmap_unuse_pt(pmap_t, vm_offset_t, pml3_entry_t, struct spglist *); 570 static int pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva, 571 struct spglist *free, struct rwlock **lockp); 572 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, 573 pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp); 574 static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va); 575 static bool pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *pde, 576 struct spglist *free); 577 static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 578 pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp); 579 580 static bool pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e, 581 u_int flags, struct rwlock **lockp); 582 #if VM_NRESERVLEVEL > 0 583 static void pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 584 struct rwlock **lockp); 585 #endif 586 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 587 static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte); 588 static vm_page_t mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 589 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate); 590 591 static bool pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, 592 vm_prot_t prot, struct rwlock **lockp); 593 static int pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde, 594 u_int flags, vm_page_t m, struct rwlock **lockp); 595 596 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); 597 static void free_pv_chunk(struct pv_chunk *pc); 598 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp); 599 static vm_page_t pmap_allocl3e(pmap_t pmap, vm_offset_t va, 600 struct rwlock **lockp); 601 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, 602 struct rwlock **lockp); 603 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, 604 struct spglist *free); 605 static boolean_t pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free); 606 607 static void pmap_invalidate_page(pmap_t pmap, vm_offset_t start); 608 static void pmap_invalidate_all(pmap_t pmap); 609 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush); 610 611 /* 612 * Internal flags for pmap_enter()'s helper functions. 613 */ 614 #define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ 615 #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ 616 617 #define UNIMPLEMENTED() panic("%s not implemented", __func__) 618 #define UNTESTED() panic("%s not yet tested", __func__) 619 620 /* Number of supported PID bits */ 621 static unsigned int isa3_pid_bits; 622 623 /* PID to start allocating from */ 624 static unsigned int isa3_base_pid; 625 626 #define PROCTAB_SIZE_SHIFT (isa3_pid_bits + 4) 627 #define PROCTAB_ENTRIES (1ul << isa3_pid_bits) 628 629 /* 630 * Map of physical memory regions. 631 */ 632 static struct mem_region *regions, *pregions; 633 static struct numa_mem_region *numa_pregions; 634 static u_int phys_avail_count; 635 static int regions_sz, pregions_sz, numa_pregions_sz; 636 static struct pate *isa3_parttab; 637 static struct prte *isa3_proctab; 638 static vmem_t *asid_arena; 639 640 extern void bs_remap_earlyboot(void); 641 642 #define RADIX_PGD_SIZE_SHIFT 16 643 #define RADIX_PGD_SIZE (1UL << RADIX_PGD_SIZE_SHIFT) 644 645 #define RADIX_PGD_INDEX_SHIFT (RADIX_PGD_SIZE_SHIFT-3) 646 #define NL2EPG (PAGE_SIZE/sizeof(pml2_entry_t)) 647 #define NL3EPG (PAGE_SIZE/sizeof(pml3_entry_t)) 648 649 #define NUPML1E (RADIX_PGD_SIZE/sizeof(uint64_t)) /* number of userland PML1 pages */ 650 #define NUPDPE (NUPML1E * NL2EPG)/* number of userland PDP pages */ 651 #define NUPDE (NUPDPE * NL3EPG) /* number of userland PD entries */ 652 653 /* POWER9 only permits a 64k partition table size. */ 654 #define PARTTAB_SIZE_SHIFT 16 655 #define PARTTAB_SIZE (1UL << PARTTAB_SIZE_SHIFT) 656 657 #define PARTTAB_HR (1UL << 63) /* host uses radix */ 658 #define PARTTAB_GR (1UL << 63) /* guest uses radix must match host */ 659 660 /* TLB flush actions. Used as argument to tlbiel_flush() */ 661 enum { 662 TLB_INVAL_SCOPE_LPID = 2, /* invalidate TLBs for current LPID */ 663 TLB_INVAL_SCOPE_GLOBAL = 3, /* invalidate all TLBs */ 664 }; 665 666 #define NPV_LIST_LOCKS MAXCPU 667 static int pmap_initialized; 668 static vm_paddr_t proctab0pa; 669 static vm_paddr_t parttab_phys; 670 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 671 672 /* 673 * Data for the pv entry allocation mechanism. 674 * Updates to pv_invl_gen are protected by the pv_list_locks[] 675 * elements, but reads are not. 676 */ 677 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 678 static struct mtx __exclusive_cache_line pv_chunks_mutex; 679 static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS]; 680 static struct md_page *pv_table; 681 static struct md_page pv_dummy; 682 683 #ifdef PV_STATS 684 #define PV_STAT(x) do { x ; } while (0) 685 #else 686 #define PV_STAT(x) do { } while (0) 687 #endif 688 689 #define pa_radix_index(pa) ((pa) >> L3_PAGE_SIZE_SHIFT) 690 #define pa_to_pvh(pa) (&pv_table[pa_radix_index(pa)]) 691 692 #define PHYS_TO_PV_LIST_LOCK(pa) \ 693 (&pv_list_locks[pa_radix_index(pa) % NPV_LIST_LOCKS]) 694 695 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \ 696 struct rwlock **_lockp = (lockp); \ 697 struct rwlock *_new_lock; \ 698 \ 699 _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \ 700 if (_new_lock != *_lockp) { \ 701 if (*_lockp != NULL) \ 702 rw_wunlock(*_lockp); \ 703 *_lockp = _new_lock; \ 704 rw_wlock(*_lockp); \ 705 } \ 706 } while (0) 707 708 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \ 709 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m)) 710 711 #define RELEASE_PV_LIST_LOCK(lockp) do { \ 712 struct rwlock **_lockp = (lockp); \ 713 \ 714 if (*_lockp != NULL) { \ 715 rw_wunlock(*_lockp); \ 716 *_lockp = NULL; \ 717 } \ 718 } while (0) 719 720 #define VM_PAGE_TO_PV_LIST_LOCK(m) \ 721 PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m)) 722 723 /* 724 * We support 52 bits, hence: 725 * bits 52 - 31 = 21, 0b10101 726 * RTS encoding details 727 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long 728 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long 729 */ 730 #define RTS_SIZE ((0x2UL << 61) | (0x5UL << 5)) 731 732 static int powernv_enabled = 1; 733 734 static __always_inline void 735 tlbiel_radix_set_isa300(uint32_t set, uint32_t is, 736 uint32_t pid, uint32_t ric, uint32_t prs) 737 { 738 uint64_t rb; 739 uint64_t rs; 740 741 rb = PPC_BITLSHIFT_VAL(set, 51) | PPC_BITLSHIFT_VAL(is, 53); 742 rs = PPC_BITLSHIFT_VAL((uint64_t)pid, 31); 743 744 __asm __volatile(PPC_TLBIEL(%0, %1, %2, %3, 1) 745 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs) 746 : "memory"); 747 } 748 749 static void 750 tlbiel_flush_isa3(uint32_t num_sets, uint32_t is) 751 { 752 uint32_t set; 753 754 __asm __volatile("ptesync": : :"memory"); 755 756 /* 757 * Flush the first set of the TLB, and the entire Page Walk Cache 758 * and partition table entries. Then flush the remaining sets of the 759 * TLB. 760 */ 761 if (is == TLB_INVAL_SCOPE_GLOBAL) { 762 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0); 763 for (set = 1; set < num_sets; set++) 764 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0); 765 } 766 767 /* Do the same for process scoped entries. */ 768 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1); 769 for (set = 1; set < num_sets; set++) 770 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1); 771 772 __asm __volatile("ptesync": : :"memory"); 773 } 774 775 static void 776 mmu_radix_tlbiel_flush(int scope) 777 { 778 MPASS(scope == TLB_INVAL_SCOPE_LPID || 779 scope == TLB_INVAL_SCOPE_GLOBAL); 780 781 tlbiel_flush_isa3(POWER9_TLB_SETS_RADIX, scope); 782 __asm __volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); 783 } 784 785 static void 786 mmu_radix_tlbie_all() 787 { 788 if (powernv_enabled) 789 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL); 790 else 791 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_LPID); 792 } 793 794 static void 795 mmu_radix_init_amor(void) 796 { 797 /* 798 * In HV mode, we init AMOR (Authority Mask Override Register) so that 799 * the hypervisor and guest can setup IAMR (Instruction Authority Mask 800 * Register), enable key 0 and set it to 1. 801 * 802 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11) 803 */ 804 mtspr(SPR_AMOR, (3ul << 62)); 805 } 806 807 static void 808 mmu_radix_init_iamr(void) 809 { 810 /* 811 * Radix always uses key0 of the IAMR to determine if an access is 812 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction 813 * fetch. 814 */ 815 mtspr(SPR_IAMR, (1ul << 62)); 816 } 817 818 static void 819 mmu_radix_pid_set(pmap_t pmap) 820 { 821 822 mtspr(SPR_PID, pmap->pm_pid); 823 isync(); 824 } 825 826 /* Quick sort callout for comparing physical addresses. */ 827 static int 828 pa_cmp(const void *a, const void *b) 829 { 830 const vm_paddr_t *pa = a, *pb = b; 831 832 if (*pa < *pb) 833 return (-1); 834 else if (*pa > *pb) 835 return (1); 836 else 837 return (0); 838 } 839 840 #define pte_load_store(ptep, pte) atomic_swap_long(ptep, pte) 841 #define pte_load_clear(ptep) atomic_swap_long(ptep, 0) 842 #define pte_store(ptep, pte) do { \ 843 MPASS((pte) & (RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_X)); \ 844 *(u_long *)(ptep) = htobe64((u_long)((pte) | PG_V | RPTE_LEAF)); \ 845 } while (0) 846 /* 847 * NB: should only be used for adding directories - not for direct mappings 848 */ 849 #define pde_store(ptep, pa) do { \ 850 *(u_long *)(ptep) = htobe64((u_long)(pa|RPTE_VALID|RPTE_SHIFT)); \ 851 } while (0) 852 853 #define pte_clear(ptep) do { \ 854 *(u_long *)(ptep) = (u_long)(0); \ 855 } while (0) 856 857 #define PMAP_PDE_SUPERPAGE (1 << 8) /* supports 2MB superpages */ 858 859 /* 860 * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB 861 * (PTE) page mappings have identical settings for the following fields: 862 */ 863 #define PG_PTE_PROMOTE (PG_X | PG_MANAGED | PG_W | PG_PTE_CACHE | \ 864 PG_M | PG_A | RPTE_EAA_MASK | PG_V) 865 866 static __inline void 867 pmap_resident_count_inc(pmap_t pmap, int count) 868 { 869 870 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 871 pmap->pm_stats.resident_count += count; 872 } 873 874 static __inline void 875 pmap_resident_count_dec(pmap_t pmap, int count) 876 { 877 878 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 879 KASSERT(pmap->pm_stats.resident_count >= count, 880 ("pmap %p resident count underflow %ld %d", pmap, 881 pmap->pm_stats.resident_count, count)); 882 pmap->pm_stats.resident_count -= count; 883 } 884 885 static void 886 pagezero(vm_offset_t va) 887 { 888 va = trunc_page(va); 889 890 bzero((void *)va, PAGE_SIZE); 891 } 892 893 static uint64_t 894 allocpages(int n) 895 { 896 u_int64_t ret; 897 898 ret = moea64_bootstrap_alloc(n * PAGE_SIZE, PAGE_SIZE); 899 for (int i = 0; i < n; i++) 900 pagezero(PHYS_TO_DMAP(ret + i * PAGE_SIZE)); 901 return (ret); 902 } 903 904 static pt_entry_t * 905 kvtopte(vm_offset_t va) 906 { 907 pt_entry_t *l3e; 908 909 l3e = pmap_pml3e(kernel_pmap, va); 910 if ((be64toh(*l3e) & RPTE_VALID) == 0) 911 return (NULL); 912 return (pmap_l3e_to_pte(l3e, va)); 913 } 914 915 void 916 mmu_radix_kenter(vm_offset_t va, vm_paddr_t pa) 917 { 918 pt_entry_t *pte; 919 920 pte = kvtopte(va); 921 MPASS(pte != NULL); 922 *pte = htobe64(pa | RPTE_VALID | RPTE_LEAF | RPTE_EAA_R | \ 923 RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A); 924 } 925 926 bool 927 mmu_radix_ps_enabled(pmap_t pmap) 928 { 929 return (superpages_enabled && (pmap->pm_flags & PMAP_PDE_SUPERPAGE) != 0); 930 } 931 932 static pt_entry_t * 933 pmap_nofault_pte(pmap_t pmap, vm_offset_t va, int *is_l3e) 934 { 935 pml3_entry_t *l3e; 936 pt_entry_t *pte; 937 938 va &= PG_PS_FRAME; 939 l3e = pmap_pml3e(pmap, va); 940 if (l3e == NULL || (be64toh(*l3e) & PG_V) == 0) 941 return (NULL); 942 943 if (be64toh(*l3e) & RPTE_LEAF) { 944 *is_l3e = 1; 945 return (l3e); 946 } 947 *is_l3e = 0; 948 va &= PG_FRAME; 949 pte = pmap_l3e_to_pte(l3e, va); 950 if (pte == NULL || (be64toh(*pte) & PG_V) == 0) 951 return (NULL); 952 return (pte); 953 } 954 955 int 956 pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags) 957 { 958 pt_entry_t *pte; 959 pt_entry_t startpte, origpte, newpte; 960 vm_page_t m; 961 int is_l3e; 962 963 startpte = 0; 964 retry: 965 if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL) 966 return (KERN_INVALID_ADDRESS); 967 origpte = newpte = be64toh(*pte); 968 if (startpte == 0) { 969 startpte = origpte; 970 if (((flags & VM_PROT_WRITE) && (startpte & PG_M)) || 971 ((flags & VM_PROT_READ) && (startpte & PG_A))) { 972 pmap_invalidate_all(pmap); 973 #ifdef INVARIANTS 974 if (VERBOSE_PMAP || pmap_logging) 975 printf("%s(%p, %#lx, %#x) (%#lx) -- invalidate all\n", 976 __func__, pmap, va, flags, origpte); 977 #endif 978 return (KERN_FAILURE); 979 } 980 } 981 #ifdef INVARIANTS 982 if (VERBOSE_PMAP || pmap_logging) 983 printf("%s(%p, %#lx, %#x) (%#lx)\n", __func__, pmap, va, 984 flags, origpte); 985 #endif 986 PMAP_LOCK(pmap); 987 if ((pte = pmap_nofault_pte(pmap, va, &is_l3e)) == NULL || 988 be64toh(*pte) != origpte) { 989 PMAP_UNLOCK(pmap); 990 return (KERN_FAILURE); 991 } 992 m = PHYS_TO_VM_PAGE(newpte & PG_FRAME); 993 MPASS(m != NULL); 994 switch (flags) { 995 case VM_PROT_READ: 996 if ((newpte & (RPTE_EAA_R|RPTE_EAA_X)) == 0) 997 goto protfail; 998 newpte |= PG_A; 999 vm_page_aflag_set(m, PGA_REFERENCED); 1000 break; 1001 case VM_PROT_WRITE: 1002 if ((newpte & RPTE_EAA_W) == 0) 1003 goto protfail; 1004 if (is_l3e) 1005 goto protfail; 1006 newpte |= PG_M; 1007 vm_page_dirty(m); 1008 break; 1009 case VM_PROT_EXECUTE: 1010 if ((newpte & RPTE_EAA_X) == 0) 1011 goto protfail; 1012 newpte |= PG_A; 1013 vm_page_aflag_set(m, PGA_REFERENCED); 1014 break; 1015 } 1016 1017 if (!atomic_cmpset_long(pte, htobe64(origpte), htobe64(newpte))) 1018 goto retry; 1019 ptesync(); 1020 PMAP_UNLOCK(pmap); 1021 if (startpte == newpte) 1022 return (KERN_FAILURE); 1023 return (0); 1024 protfail: 1025 PMAP_UNLOCK(pmap); 1026 return (KERN_PROTECTION_FAILURE); 1027 } 1028 1029 /* 1030 * Returns TRUE if the given page is mapped individually or as part of 1031 * a 2mpage. Otherwise, returns FALSE. 1032 */ 1033 boolean_t 1034 mmu_radix_page_is_mapped(vm_page_t m) 1035 { 1036 struct rwlock *lock; 1037 boolean_t rv; 1038 1039 if ((m->oflags & VPO_UNMANAGED) != 0) 1040 return (FALSE); 1041 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 1042 rw_rlock(lock); 1043 rv = !TAILQ_EMPTY(&m->md.pv_list) || 1044 ((m->flags & PG_FICTITIOUS) == 0 && 1045 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); 1046 rw_runlock(lock); 1047 return (rv); 1048 } 1049 1050 /* 1051 * Determine the appropriate bits to set in a PTE or PDE for a specified 1052 * caching mode. 1053 */ 1054 static int 1055 pmap_cache_bits(vm_memattr_t ma) 1056 { 1057 if (ma != VM_MEMATTR_DEFAULT) { 1058 switch (ma) { 1059 case VM_MEMATTR_UNCACHEABLE: 1060 return (RPTE_ATTR_GUARDEDIO); 1061 case VM_MEMATTR_CACHEABLE: 1062 return (RPTE_ATTR_MEM); 1063 case VM_MEMATTR_WRITE_BACK: 1064 case VM_MEMATTR_PREFETCHABLE: 1065 case VM_MEMATTR_WRITE_COMBINING: 1066 return (RPTE_ATTR_UNGUARDEDIO); 1067 } 1068 } 1069 return (0); 1070 } 1071 1072 static void 1073 pmap_invalidate_page(pmap_t pmap, vm_offset_t start) 1074 { 1075 ptesync(); 1076 if (pmap == kernel_pmap) 1077 radix_tlbie_invlpg_kernel_4k(start); 1078 else 1079 radix_tlbie_invlpg_user_4k(pmap->pm_pid, start); 1080 ttusync(); 1081 } 1082 1083 static void 1084 pmap_invalidate_page_2m(pmap_t pmap, vm_offset_t start) 1085 { 1086 ptesync(); 1087 if (pmap == kernel_pmap) 1088 radix_tlbie_invlpg_kernel_2m(start); 1089 else 1090 radix_tlbie_invlpg_user_2m(pmap->pm_pid, start); 1091 ttusync(); 1092 } 1093 1094 static void 1095 pmap_invalidate_pwc(pmap_t pmap) 1096 { 1097 ptesync(); 1098 if (pmap == kernel_pmap) 1099 radix_tlbie_invlpwc_kernel(); 1100 else 1101 radix_tlbie_invlpwc_user(pmap->pm_pid); 1102 ttusync(); 1103 } 1104 1105 static void 1106 pmap_invalidate_range(pmap_t pmap, vm_offset_t start, vm_offset_t end) 1107 { 1108 if (((start - end) >> PAGE_SHIFT) > 8) { 1109 pmap_invalidate_all(pmap); 1110 return; 1111 } 1112 ptesync(); 1113 if (pmap == kernel_pmap) { 1114 while (start < end) { 1115 radix_tlbie_invlpg_kernel_4k(start); 1116 start += PAGE_SIZE; 1117 } 1118 } else { 1119 while (start < end) { 1120 radix_tlbie_invlpg_user_4k(pmap->pm_pid, start); 1121 start += PAGE_SIZE; 1122 } 1123 } 1124 ttusync(); 1125 } 1126 1127 static void 1128 pmap_invalidate_all(pmap_t pmap) 1129 { 1130 ptesync(); 1131 if (pmap == kernel_pmap) 1132 radix_tlbie_flush_kernel(); 1133 else 1134 radix_tlbie_flush_user(pmap->pm_pid); 1135 ttusync(); 1136 } 1137 1138 static void 1139 pmap_invalidate_l3e_page(pmap_t pmap, vm_offset_t va, pml3_entry_t l3e) 1140 { 1141 1142 /* 1143 * When the PDE has PG_PROMOTED set, the 2MB page mapping was created 1144 * by a promotion that did not invalidate the 512 4KB page mappings 1145 * that might exist in the TLB. Consequently, at this point, the TLB 1146 * may hold both 4KB and 2MB page mappings for the address range [va, 1147 * va + L3_PAGE_SIZE). Therefore, the entire range must be invalidated here. 1148 * In contrast, when PG_PROMOTED is clear, the TLB will not hold any 1149 * 4KB page mappings for the address range [va, va + L3_PAGE_SIZE), and so a 1150 * single INVLPG suffices to invalidate the 2MB page mapping from the 1151 * TLB. 1152 */ 1153 ptesync(); 1154 if ((l3e & PG_PROMOTED) != 0) 1155 pmap_invalidate_range(pmap, va, va + L3_PAGE_SIZE - 1); 1156 else 1157 pmap_invalidate_page_2m(pmap, va); 1158 1159 pmap_invalidate_pwc(pmap); 1160 } 1161 1162 static __inline struct pv_chunk * 1163 pv_to_chunk(pv_entry_t pv) 1164 { 1165 1166 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 1167 } 1168 1169 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 1170 1171 #define PC_FREE0 0xfffffffffffffffful 1172 #define PC_FREE1 0x3ffffffffffffffful 1173 1174 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1 }; 1175 1176 /* 1177 * Ensure that the number of spare PV entries in the specified pmap meets or 1178 * exceeds the given count, "needed". 1179 * 1180 * The given PV list lock may be released. 1181 */ 1182 static void 1183 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp) 1184 { 1185 struct pch new_tail; 1186 struct pv_chunk *pc; 1187 vm_page_t m; 1188 int avail, free; 1189 bool reclaimed; 1190 1191 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1192 KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL")); 1193 1194 /* 1195 * Newly allocated PV chunks must be stored in a private list until 1196 * the required number of PV chunks have been allocated. Otherwise, 1197 * reclaim_pv_chunk() could recycle one of these chunks. In 1198 * contrast, these chunks must be added to the pmap upon allocation. 1199 */ 1200 TAILQ_INIT(&new_tail); 1201 retry: 1202 avail = 0; 1203 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) { 1204 // if ((cpu_feature2 & CPUID2_POPCNT) == 0) 1205 bit_count((bitstr_t *)pc->pc_map, 0, 1206 sizeof(pc->pc_map) * NBBY, &free); 1207 #if 0 1208 free = popcnt_pc_map_pq(pc->pc_map); 1209 #endif 1210 if (free == 0) 1211 break; 1212 avail += free; 1213 if (avail >= needed) 1214 break; 1215 } 1216 for (reclaimed = false; avail < needed; avail += _NPCPV) { 1217 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | 1218 VM_ALLOC_WIRED); 1219 if (m == NULL) { 1220 m = reclaim_pv_chunk(pmap, lockp); 1221 if (m == NULL) 1222 goto retry; 1223 reclaimed = true; 1224 } 1225 PV_STAT(atomic_add_int(&pc_chunk_count, 1)); 1226 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); 1227 dump_add_page(m->phys_addr); 1228 pc = (void *)PHYS_TO_DMAP(m->phys_addr); 1229 pc->pc_pmap = pmap; 1230 pc->pc_map[0] = PC_FREE0; 1231 pc->pc_map[1] = PC_FREE1; 1232 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 1233 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); 1234 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV)); 1235 1236 /* 1237 * The reclaim might have freed a chunk from the current pmap. 1238 * If that chunk contained available entries, we need to 1239 * re-count the number of available entries. 1240 */ 1241 if (reclaimed) 1242 goto retry; 1243 } 1244 if (!TAILQ_EMPTY(&new_tail)) { 1245 mtx_lock(&pv_chunks_mutex); 1246 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru); 1247 mtx_unlock(&pv_chunks_mutex); 1248 } 1249 } 1250 1251 /* 1252 * First find and then remove the pv entry for the specified pmap and virtual 1253 * address from the specified pv list. Returns the pv entry if found and NULL 1254 * otherwise. This operation can be performed on pv lists for either 4KB or 1255 * 2MB page mappings. 1256 */ 1257 static __inline pv_entry_t 1258 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 1259 { 1260 pv_entry_t pv; 1261 1262 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) { 1263 #ifdef INVARIANTS 1264 if (PV_PMAP(pv) == NULL) { 1265 printf("corrupted pv_chunk/pv %p\n", pv); 1266 printf("pv_chunk: %64D\n", pv_to_chunk(pv), ":"); 1267 } 1268 MPASS(PV_PMAP(pv) != NULL); 1269 MPASS(pv->pv_va != 0); 1270 #endif 1271 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 1272 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link); 1273 pvh->pv_gen++; 1274 break; 1275 } 1276 } 1277 return (pv); 1278 } 1279 1280 /* 1281 * After demotion from a 2MB page mapping to 512 4KB page mappings, 1282 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv 1283 * entries for each of the 4KB page mappings. 1284 */ 1285 static void 1286 pmap_pv_demote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 1287 struct rwlock **lockp) 1288 { 1289 struct md_page *pvh; 1290 struct pv_chunk *pc; 1291 pv_entry_t pv; 1292 vm_offset_t va_last; 1293 vm_page_t m; 1294 int bit, field; 1295 1296 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1297 KASSERT((pa & L3_PAGE_MASK) == 0, 1298 ("pmap_pv_demote_pde: pa is not 2mpage aligned")); 1299 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); 1300 1301 /* 1302 * Transfer the 2mpage's pv entry for this mapping to the first 1303 * page's pv list. Once this transfer begins, the pv list lock 1304 * must not be released until the last pv entry is reinstantiated. 1305 */ 1306 pvh = pa_to_pvh(pa); 1307 va = trunc_2mpage(va); 1308 pv = pmap_pvh_remove(pvh, pmap, va); 1309 KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found")); 1310 m = PHYS_TO_VM_PAGE(pa); 1311 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link); 1312 1313 m->md.pv_gen++; 1314 /* Instantiate the remaining NPTEPG - 1 pv entries. */ 1315 PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1)); 1316 va_last = va + L3_PAGE_SIZE - PAGE_SIZE; 1317 for (;;) { 1318 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 1319 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 1320 , ("pmap_pv_demote_pde: missing spare")); 1321 for (field = 0; field < _NPCM; field++) { 1322 while (pc->pc_map[field]) { 1323 bit = cnttzd(pc->pc_map[field]); 1324 pc->pc_map[field] &= ~(1ul << bit); 1325 pv = &pc->pc_pventry[field * 64 + bit]; 1326 va += PAGE_SIZE; 1327 pv->pv_va = va; 1328 m++; 1329 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1330 ("pmap_pv_demote_pde: page %p is not managed", m)); 1331 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link); 1332 1333 m->md.pv_gen++; 1334 if (va == va_last) 1335 goto out; 1336 } 1337 } 1338 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1339 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 1340 } 1341 out: 1342 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) { 1343 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1344 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 1345 } 1346 PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1)); 1347 PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1)); 1348 } 1349 1350 static void 1351 reclaim_pv_chunk_leave_pmap(pmap_t pmap, pmap_t locked_pmap) 1352 { 1353 1354 if (pmap == NULL) 1355 return; 1356 pmap_invalidate_all(pmap); 1357 if (pmap != locked_pmap) 1358 PMAP_UNLOCK(pmap); 1359 } 1360 1361 /* 1362 * We are in a serious low memory condition. Resort to 1363 * drastic measures to free some pages so we can allocate 1364 * another pv entry chunk. 1365 * 1366 * Returns NULL if PV entries were reclaimed from the specified pmap. 1367 * 1368 * We do not, however, unmap 2mpages because subsequent accesses will 1369 * allocate per-page pv entries until repromotion occurs, thereby 1370 * exacerbating the shortage of free pv entries. 1371 */ 1372 static int active_reclaims = 0; 1373 static vm_page_t 1374 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp) 1375 { 1376 struct pv_chunk *pc, *pc_marker, *pc_marker_end; 1377 struct pv_chunk_header pc_marker_b, pc_marker_end_b; 1378 struct md_page *pvh; 1379 pml3_entry_t *l3e; 1380 pmap_t next_pmap, pmap; 1381 pt_entry_t *pte, tpte; 1382 pv_entry_t pv; 1383 vm_offset_t va; 1384 vm_page_t m, m_pc; 1385 struct spglist free; 1386 uint64_t inuse; 1387 int bit, field, freed; 1388 1389 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 1390 KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL")); 1391 pmap = NULL; 1392 m_pc = NULL; 1393 SLIST_INIT(&free); 1394 bzero(&pc_marker_b, sizeof(pc_marker_b)); 1395 bzero(&pc_marker_end_b, sizeof(pc_marker_end_b)); 1396 pc_marker = (struct pv_chunk *)&pc_marker_b; 1397 pc_marker_end = (struct pv_chunk *)&pc_marker_end_b; 1398 1399 mtx_lock(&pv_chunks_mutex); 1400 active_reclaims++; 1401 TAILQ_INSERT_HEAD(&pv_chunks, pc_marker, pc_lru); 1402 TAILQ_INSERT_TAIL(&pv_chunks, pc_marker_end, pc_lru); 1403 while ((pc = TAILQ_NEXT(pc_marker, pc_lru)) != pc_marker_end && 1404 SLIST_EMPTY(&free)) { 1405 next_pmap = pc->pc_pmap; 1406 if (next_pmap == NULL) { 1407 /* 1408 * The next chunk is a marker. However, it is 1409 * not our marker, so active_reclaims must be 1410 * > 1. Consequently, the next_chunk code 1411 * will not rotate the pv_chunks list. 1412 */ 1413 goto next_chunk; 1414 } 1415 mtx_unlock(&pv_chunks_mutex); 1416 1417 /* 1418 * A pv_chunk can only be removed from the pc_lru list 1419 * when both pc_chunks_mutex is owned and the 1420 * corresponding pmap is locked. 1421 */ 1422 if (pmap != next_pmap) { 1423 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap); 1424 pmap = next_pmap; 1425 /* Avoid deadlock and lock recursion. */ 1426 if (pmap > locked_pmap) { 1427 RELEASE_PV_LIST_LOCK(lockp); 1428 PMAP_LOCK(pmap); 1429 mtx_lock(&pv_chunks_mutex); 1430 continue; 1431 } else if (pmap != locked_pmap) { 1432 if (PMAP_TRYLOCK(pmap)) { 1433 mtx_lock(&pv_chunks_mutex); 1434 continue; 1435 } else { 1436 pmap = NULL; /* pmap is not locked */ 1437 mtx_lock(&pv_chunks_mutex); 1438 pc = TAILQ_NEXT(pc_marker, pc_lru); 1439 if (pc == NULL || 1440 pc->pc_pmap != next_pmap) 1441 continue; 1442 goto next_chunk; 1443 } 1444 } 1445 } 1446 1447 /* 1448 * Destroy every non-wired, 4 KB page mapping in the chunk. 1449 */ 1450 freed = 0; 1451 for (field = 0; field < _NPCM; field++) { 1452 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 1453 inuse != 0; inuse &= ~(1UL << bit)) { 1454 bit = cnttzd(inuse); 1455 pv = &pc->pc_pventry[field * 64 + bit]; 1456 va = pv->pv_va; 1457 l3e = pmap_pml3e(pmap, va); 1458 if ((be64toh(*l3e) & RPTE_LEAF) != 0) 1459 continue; 1460 pte = pmap_l3e_to_pte(l3e, va); 1461 if ((be64toh(*pte) & PG_W) != 0) 1462 continue; 1463 tpte = be64toh(pte_load_clear(pte)); 1464 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); 1465 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 1466 vm_page_dirty(m); 1467 if ((tpte & PG_A) != 0) 1468 vm_page_aflag_set(m, PGA_REFERENCED); 1469 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); 1470 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link); 1471 1472 m->md.pv_gen++; 1473 if (TAILQ_EMPTY(&m->md.pv_list) && 1474 (m->flags & PG_FICTITIOUS) == 0) { 1475 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 1476 if (TAILQ_EMPTY(&pvh->pv_list)) { 1477 vm_page_aflag_clear(m, 1478 PGA_WRITEABLE); 1479 } 1480 } 1481 pc->pc_map[field] |= 1UL << bit; 1482 pmap_unuse_pt(pmap, va, be64toh(*l3e), &free); 1483 freed++; 1484 } 1485 } 1486 if (freed == 0) { 1487 mtx_lock(&pv_chunks_mutex); 1488 goto next_chunk; 1489 } 1490 /* Every freed mapping is for a 4 KB page. */ 1491 pmap_resident_count_dec(pmap, freed); 1492 PV_STAT(atomic_add_long(&pv_entry_frees, freed)); 1493 PV_STAT(atomic_add_int(&pv_entry_spare, freed)); 1494 PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); 1495 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1496 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1) { 1497 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); 1498 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); 1499 PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); 1500 /* Entire chunk is free; return it. */ 1501 m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); 1502 dump_drop_page(m_pc->phys_addr); 1503 mtx_lock(&pv_chunks_mutex); 1504 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 1505 break; 1506 } 1507 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 1508 mtx_lock(&pv_chunks_mutex); 1509 /* One freed pv entry in locked_pmap is sufficient. */ 1510 if (pmap == locked_pmap) 1511 break; 1512 next_chunk: 1513 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru); 1514 TAILQ_INSERT_AFTER(&pv_chunks, pc, pc_marker, pc_lru); 1515 if (active_reclaims == 1 && pmap != NULL) { 1516 /* 1517 * Rotate the pv chunks list so that we do not 1518 * scan the same pv chunks that could not be 1519 * freed (because they contained a wired 1520 * and/or superpage mapping) on every 1521 * invocation of reclaim_pv_chunk(). 1522 */ 1523 while ((pc = TAILQ_FIRST(&pv_chunks)) != pc_marker) { 1524 MPASS(pc->pc_pmap != NULL); 1525 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 1526 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 1527 } 1528 } 1529 } 1530 TAILQ_REMOVE(&pv_chunks, pc_marker, pc_lru); 1531 TAILQ_REMOVE(&pv_chunks, pc_marker_end, pc_lru); 1532 active_reclaims--; 1533 mtx_unlock(&pv_chunks_mutex); 1534 reclaim_pv_chunk_leave_pmap(pmap, locked_pmap); 1535 if (m_pc == NULL && !SLIST_EMPTY(&free)) { 1536 m_pc = SLIST_FIRST(&free); 1537 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 1538 /* Recycle a freed page table page. */ 1539 m_pc->ref_count = 1; 1540 } 1541 vm_page_free_pages_toq(&free, true); 1542 return (m_pc); 1543 } 1544 1545 /* 1546 * free the pv_entry back to the free list 1547 */ 1548 static void 1549 free_pv_entry(pmap_t pmap, pv_entry_t pv) 1550 { 1551 struct pv_chunk *pc; 1552 int idx, field, bit; 1553 1554 #ifdef VERBOSE_PV 1555 if (pmap != kernel_pmap) 1556 printf("%s(%p, %p)\n", __func__, pmap, pv); 1557 #endif 1558 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1559 PV_STAT(atomic_add_long(&pv_entry_frees, 1)); 1560 PV_STAT(atomic_add_int(&pv_entry_spare, 1)); 1561 PV_STAT(atomic_subtract_long(&pv_entry_count, 1)); 1562 pc = pv_to_chunk(pv); 1563 idx = pv - &pc->pc_pventry[0]; 1564 field = idx / 64; 1565 bit = idx % 64; 1566 pc->pc_map[field] |= 1ul << bit; 1567 if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1) { 1568 /* 98% of the time, pc is already at the head of the list. */ 1569 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) { 1570 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1571 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 1572 } 1573 return; 1574 } 1575 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1576 free_pv_chunk(pc); 1577 } 1578 1579 static void 1580 free_pv_chunk(struct pv_chunk *pc) 1581 { 1582 vm_page_t m; 1583 1584 mtx_lock(&pv_chunks_mutex); 1585 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 1586 mtx_unlock(&pv_chunks_mutex); 1587 PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); 1588 PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); 1589 PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); 1590 /* entire chunk is free, return it */ 1591 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); 1592 dump_drop_page(m->phys_addr); 1593 vm_page_unwire_noq(m); 1594 vm_page_free(m); 1595 } 1596 1597 /* 1598 * Returns a new PV entry, allocating a new PV chunk from the system when 1599 * needed. If this PV chunk allocation fails and a PV list lock pointer was 1600 * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is 1601 * returned. 1602 * 1603 * The given PV list lock may be released. 1604 */ 1605 static pv_entry_t 1606 get_pv_entry(pmap_t pmap, struct rwlock **lockp) 1607 { 1608 int bit, field; 1609 pv_entry_t pv; 1610 struct pv_chunk *pc; 1611 vm_page_t m; 1612 1613 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1614 PV_STAT(atomic_add_long(&pv_entry_allocs, 1)); 1615 retry: 1616 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 1617 if (pc != NULL) { 1618 for (field = 0; field < _NPCM; field++) { 1619 if (pc->pc_map[field]) { 1620 bit = cnttzd(pc->pc_map[field]); 1621 break; 1622 } 1623 } 1624 if (field < _NPCM) { 1625 pv = &pc->pc_pventry[field * 64 + bit]; 1626 pc->pc_map[field] &= ~(1ul << bit); 1627 /* If this was the last item, move it to tail */ 1628 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0) { 1629 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1630 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, 1631 pc_list); 1632 } 1633 PV_STAT(atomic_add_long(&pv_entry_count, 1)); 1634 PV_STAT(atomic_subtract_int(&pv_entry_spare, 1)); 1635 MPASS(PV_PMAP(pv) != NULL); 1636 return (pv); 1637 } 1638 } 1639 /* No free items, allocate another chunk */ 1640 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | 1641 VM_ALLOC_WIRED); 1642 if (m == NULL) { 1643 if (lockp == NULL) { 1644 PV_STAT(pc_chunk_tryfail++); 1645 return (NULL); 1646 } 1647 m = reclaim_pv_chunk(pmap, lockp); 1648 if (m == NULL) 1649 goto retry; 1650 } 1651 PV_STAT(atomic_add_int(&pc_chunk_count, 1)); 1652 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); 1653 dump_add_page(m->phys_addr); 1654 pc = (void *)PHYS_TO_DMAP(m->phys_addr); 1655 pc->pc_pmap = pmap; 1656 pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */ 1657 pc->pc_map[1] = PC_FREE1; 1658 mtx_lock(&pv_chunks_mutex); 1659 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 1660 mtx_unlock(&pv_chunks_mutex); 1661 pv = &pc->pc_pventry[0]; 1662 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 1663 PV_STAT(atomic_add_long(&pv_entry_count, 1)); 1664 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1)); 1665 MPASS(PV_PMAP(pv) != NULL); 1666 return (pv); 1667 } 1668 1669 #if VM_NRESERVLEVEL > 0 1670 /* 1671 * After promotion from 512 4KB page mappings to a single 2MB page mapping, 1672 * replace the many pv entries for the 4KB page mappings by a single pv entry 1673 * for the 2MB page mapping. 1674 */ 1675 static void 1676 pmap_pv_promote_l3e(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, 1677 struct rwlock **lockp) 1678 { 1679 struct md_page *pvh; 1680 pv_entry_t pv; 1681 vm_offset_t va_last; 1682 vm_page_t m; 1683 1684 KASSERT((pa & L3_PAGE_MASK) == 0, 1685 ("pmap_pv_promote_pde: pa is not 2mpage aligned")); 1686 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); 1687 1688 /* 1689 * Transfer the first page's pv entry for this mapping to the 2mpage's 1690 * pv list. Aside from avoiding the cost of a call to get_pv_entry(), 1691 * a transfer avoids the possibility that get_pv_entry() calls 1692 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the 1693 * mappings that is being promoted. 1694 */ 1695 m = PHYS_TO_VM_PAGE(pa); 1696 va = trunc_2mpage(va); 1697 pv = pmap_pvh_remove(&m->md, pmap, va); 1698 KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found")); 1699 pvh = pa_to_pvh(pa); 1700 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link); 1701 pvh->pv_gen++; 1702 /* Free the remaining NPTEPG - 1 pv entries. */ 1703 va_last = va + L3_PAGE_SIZE - PAGE_SIZE; 1704 do { 1705 m++; 1706 va += PAGE_SIZE; 1707 pmap_pvh_free(&m->md, pmap, va); 1708 } while (va < va_last); 1709 } 1710 #endif /* VM_NRESERVLEVEL > 0 */ 1711 1712 /* 1713 * First find and then destroy the pv entry for the specified pmap and virtual 1714 * address. This operation can be performed on pv lists for either 4KB or 2MB 1715 * page mappings. 1716 */ 1717 static void 1718 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 1719 { 1720 pv_entry_t pv; 1721 1722 pv = pmap_pvh_remove(pvh, pmap, va); 1723 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 1724 free_pv_entry(pmap, pv); 1725 } 1726 1727 /* 1728 * Conditionally create the PV entry for a 4KB page mapping if the required 1729 * memory can be allocated without resorting to reclamation. 1730 */ 1731 static boolean_t 1732 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, 1733 struct rwlock **lockp) 1734 { 1735 pv_entry_t pv; 1736 1737 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1738 /* Pass NULL instead of the lock pointer to disable reclamation. */ 1739 if ((pv = get_pv_entry(pmap, NULL)) != NULL) { 1740 pv->pv_va = va; 1741 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); 1742 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link); 1743 m->md.pv_gen++; 1744 return (TRUE); 1745 } else 1746 return (FALSE); 1747 } 1748 1749 vm_paddr_t phys_avail_debug[2 * VM_PHYSSEG_MAX]; 1750 #ifdef INVARIANTS 1751 static void 1752 validate_addr(vm_paddr_t addr, vm_size_t size) 1753 { 1754 vm_paddr_t end = addr + size; 1755 bool found = false; 1756 1757 for (int i = 0; i < 2 * phys_avail_count; i += 2) { 1758 if (addr >= phys_avail_debug[i] && 1759 end <= phys_avail_debug[i + 1]) { 1760 found = true; 1761 break; 1762 } 1763 } 1764 KASSERT(found, ("%#lx-%#lx outside of initial phys_avail array", 1765 addr, end)); 1766 } 1767 #else 1768 static void validate_addr(vm_paddr_t addr, vm_size_t size) {} 1769 #endif 1770 #define DMAP_PAGE_BITS (RPTE_VALID | RPTE_LEAF | RPTE_EAA_MASK | PG_M | PG_A) 1771 1772 static vm_paddr_t 1773 alloc_pt_page(void) 1774 { 1775 vm_paddr_t page; 1776 1777 page = allocpages(1); 1778 pagezero(PHYS_TO_DMAP(page)); 1779 return (page); 1780 } 1781 1782 static void 1783 mmu_radix_dmap_range(vm_paddr_t start, vm_paddr_t end) 1784 { 1785 pt_entry_t *pte, pteval; 1786 vm_paddr_t page; 1787 1788 if (bootverbose) 1789 printf("%s %lx -> %lx\n", __func__, start, end); 1790 while (start < end) { 1791 pteval = start | DMAP_PAGE_BITS; 1792 pte = pmap_pml1e(kernel_pmap, PHYS_TO_DMAP(start)); 1793 if ((be64toh(*pte) & RPTE_VALID) == 0) { 1794 page = alloc_pt_page(); 1795 pde_store(pte, page); 1796 } 1797 pte = pmap_l1e_to_l2e(pte, PHYS_TO_DMAP(start)); 1798 if ((start & L2_PAGE_MASK) == 0 && 1799 end - start >= L2_PAGE_SIZE) { 1800 start += L2_PAGE_SIZE; 1801 goto done; 1802 } else if ((be64toh(*pte) & RPTE_VALID) == 0) { 1803 page = alloc_pt_page(); 1804 pde_store(pte, page); 1805 } 1806 1807 pte = pmap_l2e_to_l3e(pte, PHYS_TO_DMAP(start)); 1808 if ((start & L3_PAGE_MASK) == 0 && 1809 end - start >= L3_PAGE_SIZE) { 1810 start += L3_PAGE_SIZE; 1811 goto done; 1812 } else if ((be64toh(*pte) & RPTE_VALID) == 0) { 1813 page = alloc_pt_page(); 1814 pde_store(pte, page); 1815 } 1816 pte = pmap_l3e_to_pte(pte, PHYS_TO_DMAP(start)); 1817 start += PAGE_SIZE; 1818 done: 1819 pte_store(pte, pteval); 1820 } 1821 } 1822 1823 static void 1824 mmu_radix_dmap_populate(vm_size_t hwphyssz) 1825 { 1826 vm_paddr_t start, end; 1827 1828 for (int i = 0; i < pregions_sz; i++) { 1829 start = pregions[i].mr_start; 1830 end = start + pregions[i].mr_size; 1831 if (hwphyssz && start >= hwphyssz) 1832 break; 1833 if (hwphyssz && hwphyssz < end) 1834 end = hwphyssz; 1835 mmu_radix_dmap_range(start, end); 1836 } 1837 } 1838 1839 static void 1840 mmu_radix_setup_pagetables(vm_size_t hwphyssz) 1841 { 1842 vm_paddr_t ptpages, pages; 1843 pt_entry_t *pte; 1844 vm_paddr_t l1phys; 1845 1846 bzero(kernel_pmap, sizeof(struct pmap)); 1847 PMAP_LOCK_INIT(kernel_pmap); 1848 1849 ptpages = allocpages(3); 1850 l1phys = moea64_bootstrap_alloc(RADIX_PGD_SIZE, RADIX_PGD_SIZE); 1851 validate_addr(l1phys, RADIX_PGD_SIZE); 1852 if (bootverbose) 1853 printf("l1phys=%lx\n", l1phys); 1854 MPASS((l1phys & (RADIX_PGD_SIZE-1)) == 0); 1855 for (int i = 0; i < RADIX_PGD_SIZE/PAGE_SIZE; i++) 1856 pagezero(PHYS_TO_DMAP(l1phys + i * PAGE_SIZE)); 1857 kernel_pmap->pm_pml1 = (pml1_entry_t *)PHYS_TO_DMAP(l1phys); 1858 1859 mmu_radix_dmap_populate(hwphyssz); 1860 1861 /* 1862 * Create page tables for first 128MB of KVA 1863 */ 1864 pages = ptpages; 1865 pte = pmap_pml1e(kernel_pmap, VM_MIN_KERNEL_ADDRESS); 1866 *pte = htobe64(pages | RPTE_VALID | RPTE_SHIFT); 1867 pages += PAGE_SIZE; 1868 pte = pmap_l1e_to_l2e(pte, VM_MIN_KERNEL_ADDRESS); 1869 *pte = htobe64(pages | RPTE_VALID | RPTE_SHIFT); 1870 pages += PAGE_SIZE; 1871 pte = pmap_l2e_to_l3e(pte, VM_MIN_KERNEL_ADDRESS); 1872 /* 1873 * the kernel page table pages need to be preserved in 1874 * phys_avail and not overlap with previous allocations 1875 */ 1876 pages = allocpages(nkpt); 1877 if (bootverbose) { 1878 printf("phys_avail after dmap populate and nkpt allocation\n"); 1879 for (int j = 0; j < 2 * phys_avail_count; j+=2) 1880 printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n", 1881 j, phys_avail[j], j + 1, phys_avail[j + 1]); 1882 } 1883 KPTphys = pages; 1884 for (int i = 0; i < nkpt; i++, pte++, pages += PAGE_SIZE) 1885 *pte = htobe64(pages | RPTE_VALID | RPTE_SHIFT); 1886 kernel_vm_end = VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE; 1887 if (bootverbose) 1888 printf("kernel_pmap pml1 %p\n", kernel_pmap->pm_pml1); 1889 /* 1890 * Add a physical memory segment (vm_phys_seg) corresponding to the 1891 * preallocated kernel page table pages so that vm_page structures 1892 * representing these pages will be created. The vm_page structures 1893 * are required for promotion of the corresponding kernel virtual 1894 * addresses to superpage mappings. 1895 */ 1896 vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt)); 1897 } 1898 1899 static void 1900 mmu_radix_early_bootstrap(vm_offset_t start, vm_offset_t end) 1901 { 1902 vm_paddr_t kpstart, kpend; 1903 vm_size_t physsz, hwphyssz; 1904 //uint64_t l2virt; 1905 int rm_pavail, proctab_size; 1906 int i, j; 1907 1908 kpstart = start & ~DMAP_BASE_ADDRESS; 1909 kpend = end & ~DMAP_BASE_ADDRESS; 1910 1911 /* Get physical memory regions from firmware */ 1912 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 1913 CTR0(KTR_PMAP, "mmu_radix_early_bootstrap: physical memory"); 1914 1915 if (2 * VM_PHYSSEG_MAX < regions_sz) 1916 panic("mmu_radix_early_bootstrap: phys_avail too small"); 1917 1918 if (bootverbose) 1919 for (int i = 0; i < regions_sz; i++) 1920 printf("regions[%d].mr_start=%lx regions[%d].mr_size=%lx\n", 1921 i, regions[i].mr_start, i, regions[i].mr_size); 1922 /* 1923 * XXX workaround a simulator bug 1924 */ 1925 for (int i = 0; i < regions_sz; i++) 1926 if (regions[i].mr_start & PAGE_MASK) { 1927 regions[i].mr_start += PAGE_MASK; 1928 regions[i].mr_start &= ~PAGE_MASK; 1929 regions[i].mr_size &= ~PAGE_MASK; 1930 } 1931 if (bootverbose) 1932 for (int i = 0; i < pregions_sz; i++) 1933 printf("pregions[%d].mr_start=%lx pregions[%d].mr_size=%lx\n", 1934 i, pregions[i].mr_start, i, pregions[i].mr_size); 1935 1936 phys_avail_count = 0; 1937 physsz = 0; 1938 hwphyssz = 0; 1939 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1940 for (i = 0, j = 0; i < regions_sz; i++) { 1941 if (bootverbose) 1942 printf("regions[%d].mr_start=%016lx regions[%d].mr_size=%016lx\n", 1943 i, regions[i].mr_start, i, regions[i].mr_size); 1944 1945 if (regions[i].mr_size < PAGE_SIZE) 1946 continue; 1947 1948 if (hwphyssz != 0 && 1949 (physsz + regions[i].mr_size) >= hwphyssz) { 1950 if (physsz < hwphyssz) { 1951 phys_avail[j] = regions[i].mr_start; 1952 phys_avail[j + 1] = regions[i].mr_start + 1953 (hwphyssz - physsz); 1954 physsz = hwphyssz; 1955 phys_avail_count++; 1956 dump_avail[j] = phys_avail[j]; 1957 dump_avail[j + 1] = phys_avail[j + 1]; 1958 } 1959 break; 1960 } 1961 phys_avail[j] = regions[i].mr_start; 1962 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 1963 dump_avail[j] = phys_avail[j]; 1964 dump_avail[j + 1] = phys_avail[j + 1]; 1965 1966 phys_avail_count++; 1967 physsz += regions[i].mr_size; 1968 j += 2; 1969 } 1970 1971 /* Check for overlap with the kernel and exception vectors */ 1972 rm_pavail = 0; 1973 for (j = 0; j < 2 * phys_avail_count; j+=2) { 1974 if (phys_avail[j] < EXC_LAST) 1975 phys_avail[j] += EXC_LAST; 1976 1977 if (phys_avail[j] >= kpstart && 1978 phys_avail[j + 1] <= kpend) { 1979 phys_avail[j] = phys_avail[j + 1] = ~0; 1980 rm_pavail++; 1981 continue; 1982 } 1983 1984 if (kpstart >= phys_avail[j] && 1985 kpstart < phys_avail[j + 1]) { 1986 if (kpend < phys_avail[j + 1]) { 1987 phys_avail[2 * phys_avail_count] = 1988 (kpend & ~PAGE_MASK) + PAGE_SIZE; 1989 phys_avail[2 * phys_avail_count + 1] = 1990 phys_avail[j + 1]; 1991 phys_avail_count++; 1992 } 1993 1994 phys_avail[j + 1] = kpstart & ~PAGE_MASK; 1995 } 1996 1997 if (kpend >= phys_avail[j] && 1998 kpend < phys_avail[j + 1]) { 1999 if (kpstart > phys_avail[j]) { 2000 phys_avail[2 * phys_avail_count] = phys_avail[j]; 2001 phys_avail[2 * phys_avail_count + 1] = 2002 kpstart & ~PAGE_MASK; 2003 phys_avail_count++; 2004 } 2005 2006 phys_avail[j] = (kpend & ~PAGE_MASK) + 2007 PAGE_SIZE; 2008 } 2009 } 2010 qsort(phys_avail, 2 * phys_avail_count, sizeof(phys_avail[0]), pa_cmp); 2011 for (i = 0; i < 2 * phys_avail_count; i++) 2012 phys_avail_debug[i] = phys_avail[i]; 2013 2014 /* Remove physical available regions marked for removal (~0) */ 2015 if (rm_pavail) { 2016 phys_avail_count -= rm_pavail; 2017 for (i = 2 * phys_avail_count; 2018 i < 2*(phys_avail_count + rm_pavail); i+=2) 2019 phys_avail[i] = phys_avail[i + 1] = 0; 2020 } 2021 if (bootverbose) { 2022 printf("phys_avail ranges after filtering:\n"); 2023 for (j = 0; j < 2 * phys_avail_count; j+=2) 2024 printf("phys_avail[%d]=%08lx - phys_avail[%d]=%08lx\n", 2025 j, phys_avail[j], j + 1, phys_avail[j + 1]); 2026 } 2027 physmem = btoc(physsz); 2028 2029 /* XXX assume we're running non-virtualized and 2030 * we don't support BHYVE 2031 */ 2032 if (isa3_pid_bits == 0) 2033 isa3_pid_bits = 20; 2034 if (powernv_enabled) { 2035 parttab_phys = 2036 moea64_bootstrap_alloc(PARTTAB_SIZE, PARTTAB_SIZE); 2037 validate_addr(parttab_phys, PARTTAB_SIZE); 2038 for (int i = 0; i < PARTTAB_SIZE/PAGE_SIZE; i++) 2039 pagezero(PHYS_TO_DMAP(parttab_phys + i * PAGE_SIZE)); 2040 2041 } 2042 proctab_size = 1UL << PROCTAB_SIZE_SHIFT; 2043 proctab0pa = moea64_bootstrap_alloc(proctab_size, proctab_size); 2044 validate_addr(proctab0pa, proctab_size); 2045 for (int i = 0; i < proctab_size/PAGE_SIZE; i++) 2046 pagezero(PHYS_TO_DMAP(proctab0pa + i * PAGE_SIZE)); 2047 2048 mmu_radix_setup_pagetables(hwphyssz); 2049 } 2050 2051 static void 2052 mmu_radix_late_bootstrap(vm_offset_t start, vm_offset_t end) 2053 { 2054 int i; 2055 vm_paddr_t pa; 2056 void *dpcpu; 2057 vm_offset_t va; 2058 2059 /* 2060 * Set up the Open Firmware pmap and add its mappings if not in real 2061 * mode. 2062 */ 2063 if (bootverbose) 2064 printf("%s enter\n", __func__); 2065 2066 /* 2067 * Calculate the last available physical address, and reserve the 2068 * vm_page_array (upper bound). 2069 */ 2070 Maxmem = 0; 2071 for (i = 0; phys_avail[i + 2] != 0; i += 2) 2072 Maxmem = MAX(Maxmem, powerpc_btop(phys_avail[i + 1])); 2073 2074 /* 2075 * Set the start and end of kva. 2076 */ 2077 virtual_avail = VM_MIN_KERNEL_ADDRESS; 2078 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 2079 2080 /* 2081 * Remap any early IO mappings (console framebuffer, etc.) 2082 */ 2083 bs_remap_earlyboot(); 2084 2085 /* 2086 * Allocate a kernel stack with a guard page for thread0 and map it 2087 * into the kernel page map. 2088 */ 2089 pa = allocpages(kstack_pages); 2090 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 2091 virtual_avail = va + kstack_pages * PAGE_SIZE; 2092 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); 2093 thread0.td_kstack = va; 2094 for (i = 0; i < kstack_pages; i++) { 2095 mmu_radix_kenter(va, pa); 2096 pa += PAGE_SIZE; 2097 va += PAGE_SIZE; 2098 } 2099 thread0.td_kstack_pages = kstack_pages; 2100 2101 /* 2102 * Allocate virtual address space for the message buffer. 2103 */ 2104 pa = msgbuf_phys = allocpages((msgbufsize + PAGE_MASK) >> PAGE_SHIFT); 2105 msgbufp = (struct msgbuf *)PHYS_TO_DMAP(pa); 2106 2107 /* 2108 * Allocate virtual address space for the dynamic percpu area. 2109 */ 2110 pa = allocpages(DPCPU_SIZE >> PAGE_SHIFT); 2111 dpcpu = (void *)PHYS_TO_DMAP(pa); 2112 dpcpu_init(dpcpu, curcpu); 2113 2114 crashdumpmap = (caddr_t)virtual_avail; 2115 virtual_avail += MAXDUMPPGS * PAGE_SIZE; 2116 2117 /* 2118 * Reserve some special page table entries/VA space for temporary 2119 * mapping of pages. 2120 */ 2121 } 2122 2123 static void 2124 mmu_parttab_init(void) 2125 { 2126 uint64_t ptcr; 2127 2128 isa3_parttab = (struct pate *)PHYS_TO_DMAP(parttab_phys); 2129 2130 if (bootverbose) 2131 printf("%s parttab: %p\n", __func__, isa3_parttab); 2132 ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12); 2133 if (bootverbose) 2134 printf("setting ptcr %lx\n", ptcr); 2135 mtspr(SPR_PTCR, ptcr); 2136 } 2137 2138 static void 2139 mmu_parttab_update(uint64_t lpid, uint64_t pagetab, uint64_t proctab) 2140 { 2141 uint64_t prev; 2142 2143 if (bootverbose) 2144 printf("%s isa3_parttab %p lpid %lx pagetab %lx proctab %lx\n", __func__, isa3_parttab, 2145 lpid, pagetab, proctab); 2146 prev = be64toh(isa3_parttab[lpid].pagetab); 2147 isa3_parttab[lpid].pagetab = htobe64(pagetab); 2148 isa3_parttab[lpid].proctab = htobe64(proctab); 2149 2150 if (prev & PARTTAB_HR) { 2151 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : 2152 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); 2153 __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : 2154 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); 2155 } else { 2156 __asm __volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : 2157 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); 2158 } 2159 ttusync(); 2160 } 2161 2162 static void 2163 mmu_radix_parttab_init(void) 2164 { 2165 uint64_t pagetab; 2166 2167 mmu_parttab_init(); 2168 pagetab = RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) | \ 2169 RADIX_PGD_INDEX_SHIFT | PARTTAB_HR; 2170 mmu_parttab_update(0, pagetab, 0); 2171 } 2172 2173 static void 2174 mmu_radix_proctab_register(vm_paddr_t proctabpa, uint64_t table_size) 2175 { 2176 uint64_t pagetab, proctab; 2177 2178 pagetab = be64toh(isa3_parttab[0].pagetab); 2179 proctab = proctabpa | table_size | PARTTAB_GR; 2180 mmu_parttab_update(0, pagetab, proctab); 2181 } 2182 2183 static void 2184 mmu_radix_proctab_init(void) 2185 { 2186 2187 isa3_base_pid = 1; 2188 2189 isa3_proctab = (void*)PHYS_TO_DMAP(proctab0pa); 2190 isa3_proctab->proctab0 = 2191 htobe64(RTS_SIZE | DMAP_TO_PHYS((vm_offset_t)kernel_pmap->pm_pml1) | 2192 RADIX_PGD_INDEX_SHIFT); 2193 2194 if (powernv_enabled) { 2195 mmu_radix_proctab_register(proctab0pa, PROCTAB_SIZE_SHIFT - 12); 2196 __asm __volatile("ptesync" : : : "memory"); 2197 __asm __volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : 2198 "r" (TLBIEL_INVAL_SET_LPID), "r" (0)); 2199 __asm __volatile("eieio; tlbsync; ptesync" : : : "memory"); 2200 #ifdef PSERIES 2201 } else { 2202 int64_t rc; 2203 2204 rc = phyp_hcall(H_REGISTER_PROC_TBL, 2205 PROC_TABLE_NEW | PROC_TABLE_RADIX | PROC_TABLE_GTSE, 2206 proctab0pa, 0, PROCTAB_SIZE_SHIFT - 12); 2207 if (rc != H_SUCCESS) 2208 panic("mmu_radix_proctab_init: " 2209 "failed to register process table: rc=%jd", 2210 (intmax_t)rc); 2211 #endif 2212 } 2213 2214 if (bootverbose) 2215 printf("process table %p and kernel radix PDE: %p\n", 2216 isa3_proctab, kernel_pmap->pm_pml1); 2217 mtmsr(mfmsr() | PSL_DR ); 2218 mtmsr(mfmsr() & ~PSL_DR); 2219 kernel_pmap->pm_pid = isa3_base_pid; 2220 isa3_base_pid++; 2221 } 2222 2223 void 2224 mmu_radix_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 2225 int advice) 2226 { 2227 struct rwlock *lock; 2228 pml1_entry_t *l1e; 2229 pml2_entry_t *l2e; 2230 pml3_entry_t oldl3e, *l3e; 2231 pt_entry_t *pte; 2232 vm_offset_t va, va_next; 2233 vm_page_t m; 2234 bool anychanged; 2235 2236 if (advice != MADV_DONTNEED && advice != MADV_FREE) 2237 return; 2238 anychanged = false; 2239 PMAP_LOCK(pmap); 2240 for (; sva < eva; sva = va_next) { 2241 l1e = pmap_pml1e(pmap, sva); 2242 if ((be64toh(*l1e) & PG_V) == 0) { 2243 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK; 2244 if (va_next < sva) 2245 va_next = eva; 2246 continue; 2247 } 2248 l2e = pmap_l1e_to_l2e(l1e, sva); 2249 if ((be64toh(*l2e) & PG_V) == 0) { 2250 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK; 2251 if (va_next < sva) 2252 va_next = eva; 2253 continue; 2254 } 2255 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK; 2256 if (va_next < sva) 2257 va_next = eva; 2258 l3e = pmap_l2e_to_l3e(l2e, sva); 2259 oldl3e = be64toh(*l3e); 2260 if ((oldl3e & PG_V) == 0) 2261 continue; 2262 else if ((oldl3e & RPTE_LEAF) != 0) { 2263 if ((oldl3e & PG_MANAGED) == 0) 2264 continue; 2265 lock = NULL; 2266 if (!pmap_demote_l3e_locked(pmap, l3e, sva, &lock)) { 2267 if (lock != NULL) 2268 rw_wunlock(lock); 2269 2270 /* 2271 * The large page mapping was destroyed. 2272 */ 2273 continue; 2274 } 2275 2276 /* 2277 * Unless the page mappings are wired, remove the 2278 * mapping to a single page so that a subsequent 2279 * access may repromote. Choosing the last page 2280 * within the address range [sva, min(va_next, eva)) 2281 * generally results in more repromotions. Since the 2282 * underlying page table page is fully populated, this 2283 * removal never frees a page table page. 2284 */ 2285 if ((oldl3e & PG_W) == 0) { 2286 va = eva; 2287 if (va > va_next) 2288 va = va_next; 2289 va -= PAGE_SIZE; 2290 KASSERT(va >= sva, 2291 ("mmu_radix_advise: no address gap")); 2292 pte = pmap_l3e_to_pte(l3e, va); 2293 KASSERT((be64toh(*pte) & PG_V) != 0, 2294 ("pmap_advise: invalid PTE")); 2295 pmap_remove_pte(pmap, pte, va, be64toh(*l3e), NULL, 2296 &lock); 2297 anychanged = true; 2298 } 2299 if (lock != NULL) 2300 rw_wunlock(lock); 2301 } 2302 if (va_next > eva) 2303 va_next = eva; 2304 va = va_next; 2305 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; 2306 pte++, sva += PAGE_SIZE) { 2307 MPASS(pte == pmap_pte(pmap, sva)); 2308 2309 if ((be64toh(*pte) & (PG_MANAGED | PG_V)) != (PG_MANAGED | PG_V)) 2310 goto maybe_invlrng; 2311 else if ((be64toh(*pte) & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 2312 if (advice == MADV_DONTNEED) { 2313 /* 2314 * Future calls to pmap_is_modified() 2315 * can be avoided by making the page 2316 * dirty now. 2317 */ 2318 m = PHYS_TO_VM_PAGE(be64toh(*pte) & PG_FRAME); 2319 vm_page_dirty(m); 2320 } 2321 atomic_clear_long(pte, htobe64(PG_M | PG_A)); 2322 } else if ((be64toh(*pte) & PG_A) != 0) 2323 atomic_clear_long(pte, htobe64(PG_A)); 2324 else 2325 goto maybe_invlrng; 2326 anychanged = true; 2327 continue; 2328 maybe_invlrng: 2329 if (va != va_next) { 2330 anychanged = true; 2331 va = va_next; 2332 } 2333 } 2334 if (va != va_next) 2335 anychanged = true; 2336 } 2337 if (anychanged) 2338 pmap_invalidate_all(pmap); 2339 PMAP_UNLOCK(pmap); 2340 } 2341 2342 /* 2343 * Routines used in machine-dependent code 2344 */ 2345 static void 2346 mmu_radix_bootstrap(vm_offset_t start, vm_offset_t end) 2347 { 2348 uint64_t lpcr; 2349 2350 if (bootverbose) 2351 printf("%s\n", __func__); 2352 hw_direct_map = 1; 2353 powernv_enabled = (mfmsr() & PSL_HV) ? 1 : 0; 2354 mmu_radix_early_bootstrap(start, end); 2355 if (bootverbose) 2356 printf("early bootstrap complete\n"); 2357 if (powernv_enabled) { 2358 lpcr = mfspr(SPR_LPCR); 2359 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR); 2360 mmu_radix_parttab_init(); 2361 mmu_radix_init_amor(); 2362 if (bootverbose) 2363 printf("powernv init complete\n"); 2364 } 2365 mmu_radix_init_iamr(); 2366 mmu_radix_proctab_init(); 2367 mmu_radix_pid_set(kernel_pmap); 2368 if (powernv_enabled) 2369 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL); 2370 else 2371 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_LPID); 2372 2373 mmu_radix_late_bootstrap(start, end); 2374 numa_mem_regions(&numa_pregions, &numa_pregions_sz); 2375 if (bootverbose) 2376 printf("%s done\n", __func__); 2377 pmap_bootstrapped = 1; 2378 dmaplimit = roundup2(powerpc_ptob(Maxmem), L2_PAGE_SIZE); 2379 PCPU_SET(flags, PCPU_GET(flags) | PC_FLAG_NOSRS); 2380 } 2381 2382 static void 2383 mmu_radix_cpu_bootstrap(int ap) 2384 { 2385 uint64_t lpcr; 2386 uint64_t ptcr; 2387 2388 if (powernv_enabled) { 2389 lpcr = mfspr(SPR_LPCR); 2390 mtspr(SPR_LPCR, lpcr | LPCR_UPRT | LPCR_HR); 2391 2392 ptcr = parttab_phys | (PARTTAB_SIZE_SHIFT-12); 2393 mtspr(SPR_PTCR, ptcr); 2394 mmu_radix_init_amor(); 2395 } 2396 mmu_radix_init_iamr(); 2397 mmu_radix_pid_set(kernel_pmap); 2398 if (powernv_enabled) 2399 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_GLOBAL); 2400 else 2401 mmu_radix_tlbiel_flush(TLB_INVAL_SCOPE_LPID); 2402 } 2403 2404 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l3e, CTLFLAG_RD, 0, 2405 "2MB page mapping counters"); 2406 2407 static COUNTER_U64_DEFINE_EARLY(pmap_l3e_demotions); 2408 SYSCTL_COUNTER_U64(_vm_pmap_l3e, OID_AUTO, demotions, CTLFLAG_RD, 2409 &pmap_l3e_demotions, "2MB page demotions"); 2410 2411 static COUNTER_U64_DEFINE_EARLY(pmap_l3e_mappings); 2412 SYSCTL_COUNTER_U64(_vm_pmap_l3e, OID_AUTO, mappings, CTLFLAG_RD, 2413 &pmap_l3e_mappings, "2MB page mappings"); 2414 2415 static COUNTER_U64_DEFINE_EARLY(pmap_l3e_p_failures); 2416 SYSCTL_COUNTER_U64(_vm_pmap_l3e, OID_AUTO, p_failures, CTLFLAG_RD, 2417 &pmap_l3e_p_failures, "2MB page promotion failures"); 2418 2419 static COUNTER_U64_DEFINE_EARLY(pmap_l3e_promotions); 2420 SYSCTL_COUNTER_U64(_vm_pmap_l3e, OID_AUTO, promotions, CTLFLAG_RD, 2421 &pmap_l3e_promotions, "2MB page promotions"); 2422 2423 static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2e, CTLFLAG_RD, 0, 2424 "1GB page mapping counters"); 2425 2426 static COUNTER_U64_DEFINE_EARLY(pmap_l2e_demotions); 2427 SYSCTL_COUNTER_U64(_vm_pmap_l2e, OID_AUTO, demotions, CTLFLAG_RD, 2428 &pmap_l2e_demotions, "1GB page demotions"); 2429 2430 void 2431 mmu_radix_clear_modify(vm_page_t m) 2432 { 2433 struct md_page *pvh; 2434 pmap_t pmap; 2435 pv_entry_t next_pv, pv; 2436 pml3_entry_t oldl3e, *l3e; 2437 pt_entry_t oldpte, *pte; 2438 struct rwlock *lock; 2439 vm_offset_t va; 2440 int md_gen, pvh_gen; 2441 2442 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2443 ("pmap_clear_modify: page %p is not managed", m)); 2444 vm_page_assert_busied(m); 2445 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 2446 2447 /* 2448 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. 2449 * If the object containing the page is locked and the page is not 2450 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. 2451 */ 2452 if ((m->a.flags & PGA_WRITEABLE) == 0) 2453 return; 2454 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : 2455 pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2456 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 2457 rw_wlock(lock); 2458 restart: 2459 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) { 2460 pmap = PV_PMAP(pv); 2461 if (!PMAP_TRYLOCK(pmap)) { 2462 pvh_gen = pvh->pv_gen; 2463 rw_wunlock(lock); 2464 PMAP_LOCK(pmap); 2465 rw_wlock(lock); 2466 if (pvh_gen != pvh->pv_gen) { 2467 PMAP_UNLOCK(pmap); 2468 goto restart; 2469 } 2470 } 2471 va = pv->pv_va; 2472 l3e = pmap_pml3e(pmap, va); 2473 oldl3e = be64toh(*l3e); 2474 if ((oldl3e & PG_RW) != 0 && 2475 pmap_demote_l3e_locked(pmap, l3e, va, &lock) && 2476 (oldl3e & PG_W) == 0) { 2477 /* 2478 * Write protect the mapping to a 2479 * single page so that a subsequent 2480 * write access may repromote. 2481 */ 2482 va += VM_PAGE_TO_PHYS(m) - (oldl3e & 2483 PG_PS_FRAME); 2484 pte = pmap_l3e_to_pte(l3e, va); 2485 oldpte = be64toh(*pte); 2486 while (!atomic_cmpset_long(pte, 2487 htobe64(oldpte), 2488 htobe64((oldpte | RPTE_EAA_R) & ~(PG_M | PG_RW)))) 2489 oldpte = be64toh(*pte); 2490 vm_page_dirty(m); 2491 pmap_invalidate_page(pmap, va); 2492 } 2493 PMAP_UNLOCK(pmap); 2494 } 2495 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2496 pmap = PV_PMAP(pv); 2497 if (!PMAP_TRYLOCK(pmap)) { 2498 md_gen = m->md.pv_gen; 2499 pvh_gen = pvh->pv_gen; 2500 rw_wunlock(lock); 2501 PMAP_LOCK(pmap); 2502 rw_wlock(lock); 2503 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { 2504 PMAP_UNLOCK(pmap); 2505 goto restart; 2506 } 2507 } 2508 l3e = pmap_pml3e(pmap, pv->pv_va); 2509 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0, ("pmap_clear_modify: found" 2510 " a 2mpage in page %p's pv list", m)); 2511 pte = pmap_l3e_to_pte(l3e, pv->pv_va); 2512 if ((be64toh(*pte) & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 2513 atomic_clear_long(pte, htobe64(PG_M)); 2514 pmap_invalidate_page(pmap, pv->pv_va); 2515 } 2516 PMAP_UNLOCK(pmap); 2517 } 2518 rw_wunlock(lock); 2519 } 2520 2521 void 2522 mmu_radix_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 2523 vm_size_t len, vm_offset_t src_addr) 2524 { 2525 struct rwlock *lock; 2526 struct spglist free; 2527 vm_offset_t addr; 2528 vm_offset_t end_addr = src_addr + len; 2529 vm_offset_t va_next; 2530 vm_page_t dst_pdpg, dstmpte, srcmpte; 2531 bool invalidate_all; 2532 2533 CTR6(KTR_PMAP, 2534 "%s(dst_pmap=%p, src_pmap=%p, dst_addr=%lx, len=%lu, src_addr=%lx)\n", 2535 __func__, dst_pmap, src_pmap, dst_addr, len, src_addr); 2536 2537 if (dst_addr != src_addr) 2538 return; 2539 lock = NULL; 2540 invalidate_all = false; 2541 if (dst_pmap < src_pmap) { 2542 PMAP_LOCK(dst_pmap); 2543 PMAP_LOCK(src_pmap); 2544 } else { 2545 PMAP_LOCK(src_pmap); 2546 PMAP_LOCK(dst_pmap); 2547 } 2548 2549 for (addr = src_addr; addr < end_addr; addr = va_next) { 2550 pml1_entry_t *l1e; 2551 pml2_entry_t *l2e; 2552 pml3_entry_t srcptepaddr, *l3e; 2553 pt_entry_t *src_pte, *dst_pte; 2554 2555 l1e = pmap_pml1e(src_pmap, addr); 2556 if ((be64toh(*l1e) & PG_V) == 0) { 2557 va_next = (addr + L1_PAGE_SIZE) & ~L1_PAGE_MASK; 2558 if (va_next < addr) 2559 va_next = end_addr; 2560 continue; 2561 } 2562 2563 l2e = pmap_l1e_to_l2e(l1e, addr); 2564 if ((be64toh(*l2e) & PG_V) == 0) { 2565 va_next = (addr + L2_PAGE_SIZE) & ~L2_PAGE_MASK; 2566 if (va_next < addr) 2567 va_next = end_addr; 2568 continue; 2569 } 2570 2571 va_next = (addr + L3_PAGE_SIZE) & ~L3_PAGE_MASK; 2572 if (va_next < addr) 2573 va_next = end_addr; 2574 2575 l3e = pmap_l2e_to_l3e(l2e, addr); 2576 srcptepaddr = be64toh(*l3e); 2577 if (srcptepaddr == 0) 2578 continue; 2579 2580 if (srcptepaddr & RPTE_LEAF) { 2581 if ((addr & L3_PAGE_MASK) != 0 || 2582 addr + L3_PAGE_SIZE > end_addr) 2583 continue; 2584 dst_pdpg = pmap_allocl3e(dst_pmap, addr, NULL); 2585 if (dst_pdpg == NULL) 2586 break; 2587 l3e = (pml3_entry_t *) 2588 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_pdpg)); 2589 l3e = &l3e[pmap_pml3e_index(addr)]; 2590 if (be64toh(*l3e) == 0 && ((srcptepaddr & PG_MANAGED) == 0 || 2591 pmap_pv_insert_l3e(dst_pmap, addr, srcptepaddr, 2592 PMAP_ENTER_NORECLAIM, &lock))) { 2593 *l3e = htobe64(srcptepaddr & ~PG_W); 2594 pmap_resident_count_inc(dst_pmap, 2595 L3_PAGE_SIZE / PAGE_SIZE); 2596 counter_u64_add(pmap_l3e_mappings, 1); 2597 } else 2598 dst_pdpg->ref_count--; 2599 continue; 2600 } 2601 2602 srcptepaddr &= PG_FRAME; 2603 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr); 2604 KASSERT(srcmpte->ref_count > 0, 2605 ("pmap_copy: source page table page is unused")); 2606 2607 if (va_next > end_addr) 2608 va_next = end_addr; 2609 2610 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr); 2611 src_pte = &src_pte[pmap_pte_index(addr)]; 2612 dstmpte = NULL; 2613 while (addr < va_next) { 2614 pt_entry_t ptetemp; 2615 ptetemp = be64toh(*src_pte); 2616 /* 2617 * we only virtual copy managed pages 2618 */ 2619 if ((ptetemp & PG_MANAGED) != 0) { 2620 if (dstmpte != NULL && 2621 dstmpte->pindex == pmap_l3e_pindex(addr)) 2622 dstmpte->ref_count++; 2623 else if ((dstmpte = pmap_allocpte(dst_pmap, 2624 addr, NULL)) == NULL) 2625 goto out; 2626 dst_pte = (pt_entry_t *) 2627 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte)); 2628 dst_pte = &dst_pte[pmap_pte_index(addr)]; 2629 if (be64toh(*dst_pte) == 0 && 2630 pmap_try_insert_pv_entry(dst_pmap, addr, 2631 PHYS_TO_VM_PAGE(ptetemp & PG_FRAME), 2632 &lock)) { 2633 /* 2634 * Clear the wired, modified, and 2635 * accessed (referenced) bits 2636 * during the copy. 2637 */ 2638 *dst_pte = htobe64(ptetemp & ~(PG_W | PG_M | 2639 PG_A)); 2640 pmap_resident_count_inc(dst_pmap, 1); 2641 } else { 2642 SLIST_INIT(&free); 2643 if (pmap_unwire_ptp(dst_pmap, addr, 2644 dstmpte, &free)) { 2645 /* 2646 * Although "addr" is not 2647 * mapped, paging-structure 2648 * caches could nonetheless 2649 * have entries that refer to 2650 * the freed page table pages. 2651 * Invalidate those entries. 2652 */ 2653 invalidate_all = true; 2654 vm_page_free_pages_toq(&free, 2655 true); 2656 } 2657 goto out; 2658 } 2659 if (dstmpte->ref_count >= srcmpte->ref_count) 2660 break; 2661 } 2662 addr += PAGE_SIZE; 2663 if (__predict_false((addr & L3_PAGE_MASK) == 0)) 2664 src_pte = pmap_pte(src_pmap, addr); 2665 else 2666 src_pte++; 2667 } 2668 } 2669 out: 2670 if (invalidate_all) 2671 pmap_invalidate_all(dst_pmap); 2672 if (lock != NULL) 2673 rw_wunlock(lock); 2674 PMAP_UNLOCK(src_pmap); 2675 PMAP_UNLOCK(dst_pmap); 2676 } 2677 2678 static void 2679 mmu_radix_copy_page(vm_page_t msrc, vm_page_t mdst) 2680 { 2681 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); 2682 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); 2683 2684 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst); 2685 /* 2686 * XXX slow 2687 */ 2688 bcopy((void *)src, (void *)dst, PAGE_SIZE); 2689 } 2690 2691 static void 2692 mmu_radix_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], 2693 vm_offset_t b_offset, int xfersize) 2694 { 2695 void *a_cp, *b_cp; 2696 vm_offset_t a_pg_offset, b_pg_offset; 2697 int cnt; 2698 2699 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma, 2700 a_offset, mb, b_offset, xfersize); 2701 2702 while (xfersize > 0) { 2703 a_pg_offset = a_offset & PAGE_MASK; 2704 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 2705 a_cp = (char *)(uintptr_t)PHYS_TO_DMAP( 2706 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])) + 2707 a_pg_offset; 2708 b_pg_offset = b_offset & PAGE_MASK; 2709 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 2710 b_cp = (char *)(uintptr_t)PHYS_TO_DMAP( 2711 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])) + 2712 b_pg_offset; 2713 bcopy(a_cp, b_cp, cnt); 2714 a_offset += cnt; 2715 b_offset += cnt; 2716 xfersize -= cnt; 2717 } 2718 } 2719 2720 #if VM_NRESERVLEVEL > 0 2721 /* 2722 * Tries to promote the 512, contiguous 4KB page mappings that are within a 2723 * single page table page (PTP) to a single 2MB page mapping. For promotion 2724 * to occur, two conditions must be met: (1) the 4KB page mappings must map 2725 * aligned, contiguous physical memory and (2) the 4KB page mappings must have 2726 * identical characteristics. 2727 */ 2728 static int 2729 pmap_promote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va, 2730 struct rwlock **lockp) 2731 { 2732 pml3_entry_t newpde; 2733 pt_entry_t *firstpte, oldpte, pa, *pte; 2734 vm_page_t mpte; 2735 2736 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2737 2738 /* 2739 * Examine the first PTE in the specified PTP. Abort if this PTE is 2740 * either invalid, unused, or does not map the first 4KB physical page 2741 * within a 2MB page. 2742 */ 2743 firstpte = (pt_entry_t *)PHYS_TO_DMAP(be64toh(*pde) & PG_FRAME); 2744 setpde: 2745 newpde = be64toh(*firstpte); 2746 if ((newpde & ((PG_FRAME & L3_PAGE_MASK) | PG_A | PG_V)) != (PG_A | PG_V)) { 2747 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx" 2748 " in pmap %p", va, pmap); 2749 goto fail; 2750 } 2751 if ((newpde & (PG_M | PG_RW)) == PG_RW) { 2752 /* 2753 * When PG_M is already clear, PG_RW can be cleared without 2754 * a TLB invalidation. 2755 */ 2756 if (!atomic_cmpset_long(firstpte, htobe64(newpde), htobe64((newpde | RPTE_EAA_R) & ~RPTE_EAA_W))) 2757 goto setpde; 2758 newpde &= ~RPTE_EAA_W; 2759 } 2760 2761 /* 2762 * Examine each of the other PTEs in the specified PTP. Abort if this 2763 * PTE maps an unexpected 4KB physical page or does not have identical 2764 * characteristics to the first PTE. 2765 */ 2766 pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + L3_PAGE_SIZE - PAGE_SIZE; 2767 for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) { 2768 setpte: 2769 oldpte = be64toh(*pte); 2770 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) { 2771 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx" 2772 " in pmap %p", va, pmap); 2773 goto fail; 2774 } 2775 if ((oldpte & (PG_M | PG_RW)) == PG_RW) { 2776 /* 2777 * When PG_M is already clear, PG_RW can be cleared 2778 * without a TLB invalidation. 2779 */ 2780 if (!atomic_cmpset_long(pte, htobe64(oldpte), htobe64((oldpte | RPTE_EAA_R) & ~RPTE_EAA_W))) 2781 goto setpte; 2782 oldpte &= ~RPTE_EAA_W; 2783 CTR2(KTR_PMAP, "pmap_promote_l3e: protect for va %#lx" 2784 " in pmap %p", (oldpte & PG_FRAME & L3_PAGE_MASK) | 2785 (va & ~L3_PAGE_MASK), pmap); 2786 } 2787 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) { 2788 CTR2(KTR_PMAP, "pmap_promote_l3e: failure for va %#lx" 2789 " in pmap %p", va, pmap); 2790 goto fail; 2791 } 2792 pa -= PAGE_SIZE; 2793 } 2794 2795 /* 2796 * Save the page table page in its current state until the PDE 2797 * mapping the superpage is demoted by pmap_demote_pde() or 2798 * destroyed by pmap_remove_pde(). 2799 */ 2800 mpte = PHYS_TO_VM_PAGE(be64toh(*pde) & PG_FRAME); 2801 KASSERT(mpte >= vm_page_array && 2802 mpte < &vm_page_array[vm_page_array_size], 2803 ("pmap_promote_l3e: page table page is out of range")); 2804 KASSERT(mpte->pindex == pmap_l3e_pindex(va), 2805 ("pmap_promote_l3e: page table page's pindex is wrong")); 2806 if (pmap_insert_pt_page(pmap, mpte)) { 2807 CTR2(KTR_PMAP, 2808 "pmap_promote_l3e: failure for va %#lx in pmap %p", va, 2809 pmap); 2810 goto fail; 2811 } 2812 2813 /* 2814 * Promote the pv entries. 2815 */ 2816 if ((newpde & PG_MANAGED) != 0) 2817 pmap_pv_promote_l3e(pmap, va, newpde & PG_PS_FRAME, lockp); 2818 2819 pte_store(pde, PG_PROMOTED | newpde); 2820 ptesync(); 2821 counter_u64_add(pmap_l3e_promotions, 1); 2822 CTR2(KTR_PMAP, "pmap_promote_l3e: success for va %#lx" 2823 " in pmap %p", va, pmap); 2824 return (0); 2825 fail: 2826 counter_u64_add(pmap_l3e_p_failures, 1); 2827 return (KERN_FAILURE); 2828 } 2829 #endif /* VM_NRESERVLEVEL > 0 */ 2830 2831 int 2832 mmu_radix_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, 2833 vm_prot_t prot, u_int flags, int8_t psind) 2834 { 2835 struct rwlock *lock; 2836 pml3_entry_t *l3e; 2837 pt_entry_t *pte; 2838 pt_entry_t newpte, origpte; 2839 pv_entry_t pv; 2840 vm_paddr_t opa, pa; 2841 vm_page_t mpte, om; 2842 int rv, retrycount; 2843 boolean_t nosleep, invalidate_all, invalidate_page; 2844 2845 va = trunc_page(va); 2846 retrycount = 0; 2847 invalidate_page = invalidate_all = false; 2848 CTR6(KTR_PMAP, "pmap_enter(%p, %#lx, %p, %#x, %#x, %d)", pmap, va, 2849 m, prot, flags, psind); 2850 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 2851 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va), 2852 ("pmap_enter: managed mapping within the clean submap")); 2853 if ((m->oflags & VPO_UNMANAGED) == 0) 2854 VM_PAGE_OBJECT_BUSY_ASSERT(m); 2855 2856 KASSERT((flags & PMAP_ENTER_RESERVED) == 0, 2857 ("pmap_enter: flags %u has reserved bits set", flags)); 2858 pa = VM_PAGE_TO_PHYS(m); 2859 newpte = (pt_entry_t)(pa | PG_A | PG_V | RPTE_LEAF); 2860 if ((flags & VM_PROT_WRITE) != 0) 2861 newpte |= PG_M; 2862 if ((flags & VM_PROT_READ) != 0) 2863 newpte |= PG_A; 2864 if (prot & VM_PROT_READ) 2865 newpte |= RPTE_EAA_R; 2866 if ((prot & VM_PROT_WRITE) != 0) 2867 newpte |= RPTE_EAA_W; 2868 KASSERT((newpte & (PG_M | PG_RW)) != PG_M, 2869 ("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't")); 2870 2871 if (prot & VM_PROT_EXECUTE) 2872 newpte |= PG_X; 2873 if ((flags & PMAP_ENTER_WIRED) != 0) 2874 newpte |= PG_W; 2875 if (va >= DMAP_MIN_ADDRESS) 2876 newpte |= RPTE_EAA_P; 2877 newpte |= pmap_cache_bits(m->md.mdpg_cache_attrs); 2878 /* 2879 * Set modified bit gratuitously for writeable mappings if 2880 * the page is unmanaged. We do not want to take a fault 2881 * to do the dirty bit accounting for these mappings. 2882 */ 2883 if ((m->oflags & VPO_UNMANAGED) != 0) { 2884 if ((newpte & PG_RW) != 0) 2885 newpte |= PG_M; 2886 } else 2887 newpte |= PG_MANAGED; 2888 2889 lock = NULL; 2890 PMAP_LOCK(pmap); 2891 if (psind == 1) { 2892 /* Assert the required virtual and physical alignment. */ 2893 KASSERT((va & L3_PAGE_MASK) == 0, ("pmap_enter: va unaligned")); 2894 KASSERT(m->psind > 0, ("pmap_enter: m->psind < psind")); 2895 rv = pmap_enter_l3e(pmap, va, newpte | RPTE_LEAF, flags, m, &lock); 2896 goto out; 2897 } 2898 mpte = NULL; 2899 2900 /* 2901 * In the case that a page table page is not 2902 * resident, we are creating it here. 2903 */ 2904 retry: 2905 l3e = pmap_pml3e(pmap, va); 2906 if (l3e != NULL && (be64toh(*l3e) & PG_V) != 0 && ((be64toh(*l3e) & RPTE_LEAF) == 0 || 2907 pmap_demote_l3e_locked(pmap, l3e, va, &lock))) { 2908 pte = pmap_l3e_to_pte(l3e, va); 2909 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) { 2910 mpte = PHYS_TO_VM_PAGE(be64toh(*l3e) & PG_FRAME); 2911 mpte->ref_count++; 2912 } 2913 } else if (va < VM_MAXUSER_ADDRESS) { 2914 /* 2915 * Here if the pte page isn't mapped, or if it has been 2916 * deallocated. 2917 */ 2918 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0; 2919 mpte = _pmap_allocpte(pmap, pmap_l3e_pindex(va), 2920 nosleep ? NULL : &lock); 2921 if (mpte == NULL && nosleep) { 2922 rv = KERN_RESOURCE_SHORTAGE; 2923 goto out; 2924 } 2925 if (__predict_false(retrycount++ == 6)) 2926 panic("too many retries"); 2927 invalidate_all = true; 2928 goto retry; 2929 } else 2930 panic("pmap_enter: invalid page directory va=%#lx", va); 2931 2932 origpte = be64toh(*pte); 2933 pv = NULL; 2934 2935 /* 2936 * Is the specified virtual address already mapped? 2937 */ 2938 if ((origpte & PG_V) != 0) { 2939 #ifdef INVARIANTS 2940 if (VERBOSE_PMAP || pmap_logging) { 2941 printf("cow fault pmap_enter(%p, %#lx, %p, %#x, %x, %d) --" 2942 " asid=%lu curpid=%d name=%s origpte0x%lx\n", 2943 pmap, va, m, prot, flags, psind, pmap->pm_pid, 2944 curproc->p_pid, curproc->p_comm, origpte); 2945 pmap_pte_walk(pmap->pm_pml1, va); 2946 } 2947 #endif 2948 /* 2949 * Wiring change, just update stats. We don't worry about 2950 * wiring PT pages as they remain resident as long as there 2951 * are valid mappings in them. Hence, if a user page is wired, 2952 * the PT page will be also. 2953 */ 2954 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0) 2955 pmap->pm_stats.wired_count++; 2956 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0) 2957 pmap->pm_stats.wired_count--; 2958 2959 /* 2960 * Remove the extra PT page reference. 2961 */ 2962 if (mpte != NULL) { 2963 mpte->ref_count--; 2964 KASSERT(mpte->ref_count > 0, 2965 ("pmap_enter: missing reference to page table page," 2966 " va: 0x%lx", va)); 2967 } 2968 2969 /* 2970 * Has the physical page changed? 2971 */ 2972 opa = origpte & PG_FRAME; 2973 if (opa == pa) { 2974 /* 2975 * No, might be a protection or wiring change. 2976 */ 2977 if ((origpte & PG_MANAGED) != 0 && 2978 (newpte & PG_RW) != 0) 2979 vm_page_aflag_set(m, PGA_WRITEABLE); 2980 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) { 2981 if ((newpte & (PG_A|PG_M)) != (origpte & (PG_A|PG_M))) { 2982 if (!atomic_cmpset_long(pte, htobe64(origpte), htobe64(newpte))) 2983 goto retry; 2984 if ((newpte & PG_M) != (origpte & PG_M)) 2985 vm_page_dirty(m); 2986 if ((newpte & PG_A) != (origpte & PG_A)) 2987 vm_page_aflag_set(m, PGA_REFERENCED); 2988 ptesync(); 2989 } else 2990 invalidate_all = true; 2991 if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0) 2992 goto unchanged; 2993 } 2994 goto validate; 2995 } 2996 2997 /* 2998 * The physical page has changed. Temporarily invalidate 2999 * the mapping. This ensures that all threads sharing the 3000 * pmap keep a consistent view of the mapping, which is 3001 * necessary for the correct handling of COW faults. It 3002 * also permits reuse of the old mapping's PV entry, 3003 * avoiding an allocation. 3004 * 3005 * For consistency, handle unmanaged mappings the same way. 3006 */ 3007 origpte = be64toh(pte_load_clear(pte)); 3008 KASSERT((origpte & PG_FRAME) == opa, 3009 ("pmap_enter: unexpected pa update for %#lx", va)); 3010 if ((origpte & PG_MANAGED) != 0) { 3011 om = PHYS_TO_VM_PAGE(opa); 3012 3013 /* 3014 * The pmap lock is sufficient to synchronize with 3015 * concurrent calls to pmap_page_test_mappings() and 3016 * pmap_ts_referenced(). 3017 */ 3018 if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3019 vm_page_dirty(om); 3020 if ((origpte & PG_A) != 0) 3021 vm_page_aflag_set(om, PGA_REFERENCED); 3022 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa); 3023 pv = pmap_pvh_remove(&om->md, pmap, va); 3024 if ((newpte & PG_MANAGED) == 0) 3025 free_pv_entry(pmap, pv); 3026 #ifdef INVARIANTS 3027 else if (origpte & PG_MANAGED) { 3028 if (pv == NULL) { 3029 pmap_page_print_mappings(om); 3030 MPASS(pv != NULL); 3031 } 3032 } 3033 #endif 3034 if ((om->a.flags & PGA_WRITEABLE) != 0 && 3035 TAILQ_EMPTY(&om->md.pv_list) && 3036 ((om->flags & PG_FICTITIOUS) != 0 || 3037 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) 3038 vm_page_aflag_clear(om, PGA_WRITEABLE); 3039 } 3040 if ((origpte & PG_A) != 0) 3041 invalidate_page = true; 3042 origpte = 0; 3043 } else { 3044 if (pmap != kernel_pmap) { 3045 #ifdef INVARIANTS 3046 if (VERBOSE_PMAP || pmap_logging) 3047 printf("pmap_enter(%p, %#lx, %p, %#x, %x, %d) -- asid=%lu curpid=%d name=%s\n", 3048 pmap, va, m, prot, flags, psind, 3049 pmap->pm_pid, curproc->p_pid, 3050 curproc->p_comm); 3051 #endif 3052 } 3053 3054 /* 3055 * Increment the counters. 3056 */ 3057 if ((newpte & PG_W) != 0) 3058 pmap->pm_stats.wired_count++; 3059 pmap_resident_count_inc(pmap, 1); 3060 } 3061 3062 /* 3063 * Enter on the PV list if part of our managed memory. 3064 */ 3065 if ((newpte & PG_MANAGED) != 0) { 3066 if (pv == NULL) { 3067 pv = get_pv_entry(pmap, &lock); 3068 pv->pv_va = va; 3069 } 3070 #ifdef VERBOSE_PV 3071 else 3072 printf("reassigning pv: %p to pmap: %p\n", 3073 pv, pmap); 3074 #endif 3075 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); 3076 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link); 3077 m->md.pv_gen++; 3078 if ((newpte & PG_RW) != 0) 3079 vm_page_aflag_set(m, PGA_WRITEABLE); 3080 } 3081 3082 /* 3083 * Update the PTE. 3084 */ 3085 if ((origpte & PG_V) != 0) { 3086 validate: 3087 origpte = be64toh(pte_load_store(pte, htobe64(newpte))); 3088 KASSERT((origpte & PG_FRAME) == pa, 3089 ("pmap_enter: unexpected pa update for %#lx", va)); 3090 if ((newpte & PG_M) == 0 && (origpte & (PG_M | PG_RW)) == 3091 (PG_M | PG_RW)) { 3092 if ((origpte & PG_MANAGED) != 0) 3093 vm_page_dirty(m); 3094 invalidate_page = true; 3095 3096 /* 3097 * Although the PTE may still have PG_RW set, TLB 3098 * invalidation may nonetheless be required because 3099 * the PTE no longer has PG_M set. 3100 */ 3101 } else if ((origpte & PG_X) != 0 || (newpte & PG_X) == 0) { 3102 /* 3103 * Removing capabilities requires invalidation on POWER 3104 */ 3105 invalidate_page = true; 3106 goto unchanged; 3107 } 3108 if ((origpte & PG_A) != 0) 3109 invalidate_page = true; 3110 } else { 3111 pte_store(pte, newpte); 3112 ptesync(); 3113 } 3114 unchanged: 3115 3116 #if VM_NRESERVLEVEL > 0 3117 /* 3118 * If both the page table page and the reservation are fully 3119 * populated, then attempt promotion. 3120 */ 3121 if ((mpte == NULL || mpte->ref_count == NPTEPG) && 3122 mmu_radix_ps_enabled(pmap) && 3123 (m->flags & PG_FICTITIOUS) == 0 && 3124 vm_reserv_level_iffullpop(m) == 0 && 3125 pmap_promote_l3e(pmap, l3e, va, &lock) == 0) 3126 invalidate_all = true; 3127 #endif 3128 if (invalidate_all) 3129 pmap_invalidate_all(pmap); 3130 else if (invalidate_page) 3131 pmap_invalidate_page(pmap, va); 3132 3133 rv = KERN_SUCCESS; 3134 out: 3135 if (lock != NULL) 3136 rw_wunlock(lock); 3137 PMAP_UNLOCK(pmap); 3138 3139 return (rv); 3140 } 3141 3142 /* 3143 * Tries to create a read- and/or execute-only 2MB page mapping. Returns true 3144 * if successful. Returns false if (1) a page table page cannot be allocated 3145 * without sleeping, (2) a mapping already exists at the specified virtual 3146 * address, or (3) a PV entry cannot be allocated without reclaiming another 3147 * PV entry. 3148 */ 3149 static bool 3150 pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 3151 struct rwlock **lockp) 3152 { 3153 pml3_entry_t newpde; 3154 3155 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3156 newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs) | 3157 RPTE_LEAF | PG_V; 3158 if ((m->oflags & VPO_UNMANAGED) == 0) 3159 newpde |= PG_MANAGED; 3160 if (prot & VM_PROT_EXECUTE) 3161 newpde |= PG_X; 3162 if (prot & VM_PROT_READ) 3163 newpde |= RPTE_EAA_R; 3164 if (va >= DMAP_MIN_ADDRESS) 3165 newpde |= RPTE_EAA_P; 3166 return (pmap_enter_l3e(pmap, va, newpde, PMAP_ENTER_NOSLEEP | 3167 PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) == 3168 KERN_SUCCESS); 3169 } 3170 3171 /* 3172 * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if 3173 * the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE 3174 * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and 3175 * a mapping already exists at the specified virtual address. Returns 3176 * KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page table 3177 * page allocation failed. Returns KERN_RESOURCE_SHORTAGE if 3178 * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed. 3179 * 3180 * The parameter "m" is only used when creating a managed, writeable mapping. 3181 */ 3182 static int 3183 pmap_enter_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t newpde, u_int flags, 3184 vm_page_t m, struct rwlock **lockp) 3185 { 3186 struct spglist free; 3187 pml3_entry_t oldl3e, *l3e; 3188 vm_page_t mt, pdpg; 3189 3190 KASSERT((newpde & (PG_M | PG_RW)) != PG_RW, 3191 ("pmap_enter_pde: newpde is missing PG_M")); 3192 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3193 3194 if ((pdpg = pmap_allocl3e(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ? 3195 NULL : lockp)) == NULL) { 3196 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 3197 " in pmap %p", va, pmap); 3198 return (KERN_RESOURCE_SHORTAGE); 3199 } 3200 l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg)); 3201 l3e = &l3e[pmap_pml3e_index(va)]; 3202 oldl3e = be64toh(*l3e); 3203 if ((oldl3e & PG_V) != 0) { 3204 KASSERT(pdpg->ref_count > 1, 3205 ("pmap_enter_pde: pdpg's wire count is too low")); 3206 if ((flags & PMAP_ENTER_NOREPLACE) != 0) { 3207 pdpg->ref_count--; 3208 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 3209 " in pmap %p", va, pmap); 3210 return (KERN_FAILURE); 3211 } 3212 /* Break the existing mapping(s). */ 3213 SLIST_INIT(&free); 3214 if ((oldl3e & RPTE_LEAF) != 0) { 3215 /* 3216 * The reference to the PD page that was acquired by 3217 * pmap_allocl3e() ensures that it won't be freed. 3218 * However, if the PDE resulted from a promotion, then 3219 * a reserved PT page could be freed. 3220 */ 3221 (void)pmap_remove_l3e(pmap, l3e, va, &free, lockp); 3222 pmap_invalidate_l3e_page(pmap, va, oldl3e); 3223 } else { 3224 if (pmap_remove_ptes(pmap, va, va + L3_PAGE_SIZE, l3e, 3225 &free, lockp)) 3226 pmap_invalidate_all(pmap); 3227 } 3228 vm_page_free_pages_toq(&free, true); 3229 if (va >= VM_MAXUSER_ADDRESS) { 3230 mt = PHYS_TO_VM_PAGE(be64toh(*l3e) & PG_FRAME); 3231 if (pmap_insert_pt_page(pmap, mt)) { 3232 /* 3233 * XXX Currently, this can't happen because 3234 * we do not perform pmap_enter(psind == 1) 3235 * on the kernel pmap. 3236 */ 3237 panic("pmap_enter_pde: trie insert failed"); 3238 } 3239 } else 3240 KASSERT(be64toh(*l3e) == 0, ("pmap_enter_pde: non-zero pde %p", 3241 l3e)); 3242 } 3243 if ((newpde & PG_MANAGED) != 0) { 3244 /* 3245 * Abort this mapping if its PV entry could not be created. 3246 */ 3247 if (!pmap_pv_insert_l3e(pmap, va, newpde, flags, lockp)) { 3248 SLIST_INIT(&free); 3249 if (pmap_unwire_ptp(pmap, va, pdpg, &free)) { 3250 /* 3251 * Although "va" is not mapped, paging- 3252 * structure caches could nonetheless have 3253 * entries that refer to the freed page table 3254 * pages. Invalidate those entries. 3255 */ 3256 pmap_invalidate_page(pmap, va); 3257 vm_page_free_pages_toq(&free, true); 3258 } 3259 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" 3260 " in pmap %p", va, pmap); 3261 return (KERN_RESOURCE_SHORTAGE); 3262 } 3263 if ((newpde & PG_RW) != 0) { 3264 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++) 3265 vm_page_aflag_set(mt, PGA_WRITEABLE); 3266 } 3267 } 3268 3269 /* 3270 * Increment counters. 3271 */ 3272 if ((newpde & PG_W) != 0) 3273 pmap->pm_stats.wired_count += L3_PAGE_SIZE / PAGE_SIZE; 3274 pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE); 3275 3276 /* 3277 * Map the superpage. (This is not a promoted mapping; there will not 3278 * be any lingering 4KB page mappings in the TLB.) 3279 */ 3280 pte_store(l3e, newpde); 3281 ptesync(); 3282 3283 counter_u64_add(pmap_l3e_mappings, 1); 3284 CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx" 3285 " in pmap %p", va, pmap); 3286 return (KERN_SUCCESS); 3287 } 3288 3289 void 3290 mmu_radix_enter_object(pmap_t pmap, vm_offset_t start, 3291 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 3292 { 3293 3294 struct rwlock *lock; 3295 vm_offset_t va; 3296 vm_page_t m, mpte; 3297 vm_pindex_t diff, psize; 3298 bool invalidate; 3299 VM_OBJECT_ASSERT_LOCKED(m_start->object); 3300 3301 CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start, 3302 end, m_start, prot); 3303 3304 invalidate = false; 3305 psize = atop(end - start); 3306 mpte = NULL; 3307 m = m_start; 3308 lock = NULL; 3309 PMAP_LOCK(pmap); 3310 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 3311 va = start + ptoa(diff); 3312 if ((va & L3_PAGE_MASK) == 0 && va + L3_PAGE_SIZE <= end && 3313 m->psind == 1 && mmu_radix_ps_enabled(pmap) && 3314 pmap_enter_2mpage(pmap, va, m, prot, &lock)) 3315 m = &m[L3_PAGE_SIZE / PAGE_SIZE - 1]; 3316 else 3317 mpte = mmu_radix_enter_quick_locked(pmap, va, m, prot, 3318 mpte, &lock, &invalidate); 3319 m = TAILQ_NEXT(m, listq); 3320 } 3321 ptesync(); 3322 if (lock != NULL) 3323 rw_wunlock(lock); 3324 if (invalidate) 3325 pmap_invalidate_all(pmap); 3326 PMAP_UNLOCK(pmap); 3327 } 3328 3329 static vm_page_t 3330 mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 3331 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp, bool *invalidate) 3332 { 3333 struct spglist free; 3334 pt_entry_t *pte; 3335 vm_paddr_t pa; 3336 3337 KASSERT(!VA_IS_CLEANMAP(va) || 3338 (m->oflags & VPO_UNMANAGED) != 0, 3339 ("mmu_radix_enter_quick_locked: managed mapping within the clean submap")); 3340 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3341 3342 /* 3343 * In the case that a page table page is not 3344 * resident, we are creating it here. 3345 */ 3346 if (va < VM_MAXUSER_ADDRESS) { 3347 vm_pindex_t ptepindex; 3348 pml3_entry_t *ptepa; 3349 3350 /* 3351 * Calculate pagetable page index 3352 */ 3353 ptepindex = pmap_l3e_pindex(va); 3354 if (mpte && (mpte->pindex == ptepindex)) { 3355 mpte->ref_count++; 3356 } else { 3357 /* 3358 * Get the page directory entry 3359 */ 3360 ptepa = pmap_pml3e(pmap, va); 3361 3362 /* 3363 * If the page table page is mapped, we just increment 3364 * the hold count, and activate it. Otherwise, we 3365 * attempt to allocate a page table page. If this 3366 * attempt fails, we don't retry. Instead, we give up. 3367 */ 3368 if (ptepa && (be64toh(*ptepa) & PG_V) != 0) { 3369 if (be64toh(*ptepa) & RPTE_LEAF) 3370 return (NULL); 3371 mpte = PHYS_TO_VM_PAGE(be64toh(*ptepa) & PG_FRAME); 3372 mpte->ref_count++; 3373 } else { 3374 /* 3375 * Pass NULL instead of the PV list lock 3376 * pointer, because we don't intend to sleep. 3377 */ 3378 mpte = _pmap_allocpte(pmap, ptepindex, NULL); 3379 if (mpte == NULL) 3380 return (mpte); 3381 } 3382 } 3383 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte)); 3384 pte = &pte[pmap_pte_index(va)]; 3385 } else { 3386 mpte = NULL; 3387 pte = pmap_pte(pmap, va); 3388 } 3389 if (be64toh(*pte)) { 3390 if (mpte != NULL) { 3391 mpte->ref_count--; 3392 mpte = NULL; 3393 } 3394 return (mpte); 3395 } 3396 3397 /* 3398 * Enter on the PV list if part of our managed memory. 3399 */ 3400 if ((m->oflags & VPO_UNMANAGED) == 0 && 3401 !pmap_try_insert_pv_entry(pmap, va, m, lockp)) { 3402 if (mpte != NULL) { 3403 SLIST_INIT(&free); 3404 if (pmap_unwire_ptp(pmap, va, mpte, &free)) { 3405 /* 3406 * Although "va" is not mapped, paging- 3407 * structure caches could nonetheless have 3408 * entries that refer to the freed page table 3409 * pages. Invalidate those entries. 3410 */ 3411 *invalidate = true; 3412 vm_page_free_pages_toq(&free, true); 3413 } 3414 mpte = NULL; 3415 } 3416 return (mpte); 3417 } 3418 3419 /* 3420 * Increment counters 3421 */ 3422 pmap_resident_count_inc(pmap, 1); 3423 3424 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.mdpg_cache_attrs); 3425 if (prot & VM_PROT_EXECUTE) 3426 pa |= PG_X; 3427 else 3428 pa |= RPTE_EAA_R; 3429 if ((m->oflags & VPO_UNMANAGED) == 0) 3430 pa |= PG_MANAGED; 3431 3432 pte_store(pte, pa); 3433 return (mpte); 3434 } 3435 3436 void 3437 mmu_radix_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, 3438 vm_prot_t prot) 3439 { 3440 struct rwlock *lock; 3441 bool invalidate; 3442 3443 lock = NULL; 3444 invalidate = false; 3445 PMAP_LOCK(pmap); 3446 mmu_radix_enter_quick_locked(pmap, va, m, prot, NULL, &lock, 3447 &invalidate); 3448 ptesync(); 3449 if (lock != NULL) 3450 rw_wunlock(lock); 3451 if (invalidate) 3452 pmap_invalidate_all(pmap); 3453 PMAP_UNLOCK(pmap); 3454 } 3455 3456 vm_paddr_t 3457 mmu_radix_extract(pmap_t pmap, vm_offset_t va) 3458 { 3459 pml3_entry_t *l3e; 3460 pt_entry_t *pte; 3461 vm_paddr_t pa; 3462 3463 l3e = pmap_pml3e(pmap, va); 3464 if (__predict_false(l3e == NULL)) 3465 return (0); 3466 if (be64toh(*l3e) & RPTE_LEAF) { 3467 pa = (be64toh(*l3e) & PG_PS_FRAME) | (va & L3_PAGE_MASK); 3468 pa |= (va & L3_PAGE_MASK); 3469 } else { 3470 /* 3471 * Beware of a concurrent promotion that changes the 3472 * PDE at this point! For example, vtopte() must not 3473 * be used to access the PTE because it would use the 3474 * new PDE. It is, however, safe to use the old PDE 3475 * because the page table page is preserved by the 3476 * promotion. 3477 */ 3478 pte = pmap_l3e_to_pte(l3e, va); 3479 if (__predict_false(pte == NULL)) 3480 return (0); 3481 pa = be64toh(*pte); 3482 pa = (pa & PG_FRAME) | (va & PAGE_MASK); 3483 pa |= (va & PAGE_MASK); 3484 } 3485 return (pa); 3486 } 3487 3488 vm_page_t 3489 mmu_radix_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 3490 { 3491 pml3_entry_t l3e, *l3ep; 3492 pt_entry_t pte; 3493 vm_page_t m; 3494 3495 m = NULL; 3496 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot); 3497 PMAP_LOCK(pmap); 3498 l3ep = pmap_pml3e(pmap, va); 3499 if (l3ep != NULL && (l3e = be64toh(*l3ep))) { 3500 if (l3e & RPTE_LEAF) { 3501 if ((l3e & PG_RW) || (prot & VM_PROT_WRITE) == 0) 3502 m = PHYS_TO_VM_PAGE((l3e & PG_PS_FRAME) | 3503 (va & L3_PAGE_MASK)); 3504 } else { 3505 /* Native endian PTE, do not pass to pmap functions */ 3506 pte = be64toh(*pmap_l3e_to_pte(l3ep, va)); 3507 if ((pte & PG_V) && 3508 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) 3509 m = PHYS_TO_VM_PAGE(pte & PG_FRAME); 3510 } 3511 if (m != NULL && !vm_page_wire_mapped(m)) 3512 m = NULL; 3513 } 3514 PMAP_UNLOCK(pmap); 3515 return (m); 3516 } 3517 3518 static void 3519 mmu_radix_growkernel(vm_offset_t addr) 3520 { 3521 vm_paddr_t paddr; 3522 vm_page_t nkpg; 3523 pml3_entry_t *l3e; 3524 pml2_entry_t *l2e; 3525 3526 CTR2(KTR_PMAP, "%s(%#x)", __func__, addr); 3527 if (VM_MIN_KERNEL_ADDRESS < addr && 3528 addr < (VM_MIN_KERNEL_ADDRESS + nkpt * L3_PAGE_SIZE)) 3529 return; 3530 3531 addr = roundup2(addr, L3_PAGE_SIZE); 3532 if (addr - 1 >= vm_map_max(kernel_map)) 3533 addr = vm_map_max(kernel_map); 3534 while (kernel_vm_end < addr) { 3535 l2e = pmap_pml2e(kernel_pmap, kernel_vm_end); 3536 if ((be64toh(*l2e) & PG_V) == 0) { 3537 /* We need a new PDP entry */ 3538 nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_PAGE_SIZE_SHIFT, 3539 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | 3540 VM_ALLOC_WIRED | VM_ALLOC_ZERO); 3541 if (nkpg == NULL) 3542 panic("pmap_growkernel: no memory to grow kernel"); 3543 if ((nkpg->flags & PG_ZERO) == 0) 3544 mmu_radix_zero_page(nkpg); 3545 paddr = VM_PAGE_TO_PHYS(nkpg); 3546 pde_store(l2e, paddr); 3547 continue; /* try again */ 3548 } 3549 l3e = pmap_l2e_to_l3e(l2e, kernel_vm_end); 3550 if ((be64toh(*l3e) & PG_V) != 0) { 3551 kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK; 3552 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 3553 kernel_vm_end = vm_map_max(kernel_map); 3554 break; 3555 } 3556 continue; 3557 } 3558 3559 nkpg = vm_page_alloc(NULL, pmap_l3e_pindex(kernel_vm_end), 3560 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 3561 VM_ALLOC_ZERO); 3562 if (nkpg == NULL) 3563 panic("pmap_growkernel: no memory to grow kernel"); 3564 if ((nkpg->flags & PG_ZERO) == 0) 3565 mmu_radix_zero_page(nkpg); 3566 paddr = VM_PAGE_TO_PHYS(nkpg); 3567 pde_store(l3e, paddr); 3568 3569 kernel_vm_end = (kernel_vm_end + L3_PAGE_SIZE) & ~L3_PAGE_MASK; 3570 if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { 3571 kernel_vm_end = vm_map_max(kernel_map); 3572 break; 3573 } 3574 } 3575 ptesync(); 3576 } 3577 3578 static MALLOC_DEFINE(M_RADIX_PGD, "radix_pgd", "radix page table root directory"); 3579 static uma_zone_t zone_radix_pgd; 3580 3581 static int 3582 radix_pgd_import(void *arg __unused, void **store, int count, int domain __unused, 3583 int flags) 3584 { 3585 3586 for (int i = 0; i < count; i++) { 3587 vm_page_t m = vm_page_alloc_contig(NULL, 0, 3588 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | 3589 VM_ALLOC_ZERO | VM_ALLOC_WAITOK, RADIX_PGD_SIZE/PAGE_SIZE, 3590 0, (vm_paddr_t)-1, RADIX_PGD_SIZE, L1_PAGE_SIZE, 3591 VM_MEMATTR_DEFAULT); 3592 /* XXX zero on alloc here so we don't have to later */ 3593 store[i] = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 3594 } 3595 return (count); 3596 } 3597 3598 static void 3599 radix_pgd_release(void *arg __unused, void **store, int count) 3600 { 3601 vm_page_t m; 3602 struct spglist free; 3603 int page_count; 3604 3605 SLIST_INIT(&free); 3606 page_count = RADIX_PGD_SIZE/PAGE_SIZE; 3607 3608 for (int i = 0; i < count; i++) { 3609 /* 3610 * XXX selectively remove dmap and KVA entries so we don't 3611 * need to bzero 3612 */ 3613 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)store[i])); 3614 for (int j = page_count-1; j >= 0; j--) { 3615 vm_page_unwire_noq(&m[j]); 3616 SLIST_INSERT_HEAD(&free, &m[j], plinks.s.ss); 3617 } 3618 vm_page_free_pages_toq(&free, false); 3619 } 3620 } 3621 3622 static void 3623 mmu_radix_init() 3624 { 3625 vm_page_t mpte; 3626 vm_size_t s; 3627 int error, i, pv_npg; 3628 3629 /* XXX is this really needed for POWER? */ 3630 /* L1TF, reserve page @0 unconditionally */ 3631 vm_page_blacklist_add(0, bootverbose); 3632 3633 zone_radix_pgd = uma_zcache_create("radix_pgd_cache", 3634 RADIX_PGD_SIZE, NULL, NULL, 3635 #ifdef INVARIANTS 3636 trash_init, trash_fini, 3637 #else 3638 NULL, NULL, 3639 #endif 3640 radix_pgd_import, radix_pgd_release, 3641 NULL, UMA_ZONE_NOBUCKET); 3642 3643 /* 3644 * Initialize the vm page array entries for the kernel pmap's 3645 * page table pages. 3646 */ 3647 PMAP_LOCK(kernel_pmap); 3648 for (i = 0; i < nkpt; i++) { 3649 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT)); 3650 KASSERT(mpte >= vm_page_array && 3651 mpte < &vm_page_array[vm_page_array_size], 3652 ("pmap_init: page table page is out of range size: %lu", 3653 vm_page_array_size)); 3654 mpte->pindex = pmap_l3e_pindex(VM_MIN_KERNEL_ADDRESS) + i; 3655 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT); 3656 MPASS(PHYS_TO_VM_PAGE(mpte->phys_addr) == mpte); 3657 //pmap_insert_pt_page(kernel_pmap, mpte); 3658 mpte->ref_count = 1; 3659 } 3660 PMAP_UNLOCK(kernel_pmap); 3661 vm_wire_add(nkpt); 3662 3663 CTR1(KTR_PMAP, "%s()", __func__); 3664 TAILQ_INIT(&pv_dummy.pv_list); 3665 3666 /* 3667 * Are large page mappings enabled? 3668 */ 3669 TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled); 3670 if (superpages_enabled) { 3671 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, 3672 ("pmap_init: can't assign to pagesizes[1]")); 3673 pagesizes[1] = L3_PAGE_SIZE; 3674 } 3675 3676 /* 3677 * Initialize the pv chunk list mutex. 3678 */ 3679 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF); 3680 3681 /* 3682 * Initialize the pool of pv list locks. 3683 */ 3684 for (i = 0; i < NPV_LIST_LOCKS; i++) 3685 rw_init(&pv_list_locks[i], "pmap pv list"); 3686 3687 /* 3688 * Calculate the size of the pv head table for superpages. 3689 */ 3690 pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L3_PAGE_SIZE); 3691 3692 /* 3693 * Allocate memory for the pv head table for superpages. 3694 */ 3695 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 3696 s = round_page(s); 3697 pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO); 3698 for (i = 0; i < pv_npg; i++) 3699 TAILQ_INIT(&pv_table[i].pv_list); 3700 TAILQ_INIT(&pv_dummy.pv_list); 3701 3702 pmap_initialized = 1; 3703 mtx_init(&qframe_mtx, "qfrmlk", NULL, MTX_SPIN); 3704 error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, 3705 (vmem_addr_t *)&qframe); 3706 3707 if (error != 0) 3708 panic("qframe allocation failed"); 3709 asid_arena = vmem_create("ASID", isa3_base_pid + 1, (1<<isa3_pid_bits), 3710 1, 1, M_WAITOK); 3711 } 3712 3713 static boolean_t 3714 pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) 3715 { 3716 struct rwlock *lock; 3717 pv_entry_t pv; 3718 struct md_page *pvh; 3719 pt_entry_t *pte, mask; 3720 pmap_t pmap; 3721 int md_gen, pvh_gen; 3722 boolean_t rv; 3723 3724 rv = FALSE; 3725 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 3726 rw_rlock(lock); 3727 restart: 3728 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 3729 pmap = PV_PMAP(pv); 3730 if (!PMAP_TRYLOCK(pmap)) { 3731 md_gen = m->md.pv_gen; 3732 rw_runlock(lock); 3733 PMAP_LOCK(pmap); 3734 rw_rlock(lock); 3735 if (md_gen != m->md.pv_gen) { 3736 PMAP_UNLOCK(pmap); 3737 goto restart; 3738 } 3739 } 3740 pte = pmap_pte(pmap, pv->pv_va); 3741 mask = 0; 3742 if (modified) 3743 mask |= PG_RW | PG_M; 3744 if (accessed) 3745 mask |= PG_V | PG_A; 3746 rv = (be64toh(*pte) & mask) == mask; 3747 PMAP_UNLOCK(pmap); 3748 if (rv) 3749 goto out; 3750 } 3751 if ((m->flags & PG_FICTITIOUS) == 0) { 3752 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3753 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) { 3754 pmap = PV_PMAP(pv); 3755 if (!PMAP_TRYLOCK(pmap)) { 3756 md_gen = m->md.pv_gen; 3757 pvh_gen = pvh->pv_gen; 3758 rw_runlock(lock); 3759 PMAP_LOCK(pmap); 3760 rw_rlock(lock); 3761 if (md_gen != m->md.pv_gen || 3762 pvh_gen != pvh->pv_gen) { 3763 PMAP_UNLOCK(pmap); 3764 goto restart; 3765 } 3766 } 3767 pte = pmap_pml3e(pmap, pv->pv_va); 3768 mask = 0; 3769 if (modified) 3770 mask |= PG_RW | PG_M; 3771 if (accessed) 3772 mask |= PG_V | PG_A; 3773 rv = (be64toh(*pte) & mask) == mask; 3774 PMAP_UNLOCK(pmap); 3775 if (rv) 3776 goto out; 3777 } 3778 } 3779 out: 3780 rw_runlock(lock); 3781 return (rv); 3782 } 3783 3784 /* 3785 * pmap_is_modified: 3786 * 3787 * Return whether or not the specified physical page was modified 3788 * in any physical maps. 3789 */ 3790 boolean_t 3791 mmu_radix_is_modified(vm_page_t m) 3792 { 3793 3794 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3795 ("pmap_is_modified: page %p is not managed", m)); 3796 3797 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 3798 /* 3799 * If the page is not busied then this check is racy. 3800 */ 3801 if (!pmap_page_is_write_mapped(m)) 3802 return (FALSE); 3803 return (pmap_page_test_mappings(m, FALSE, TRUE)); 3804 } 3805 3806 boolean_t 3807 mmu_radix_is_prefaultable(pmap_t pmap, vm_offset_t addr) 3808 { 3809 pml3_entry_t *l3e; 3810 pt_entry_t *pte; 3811 boolean_t rv; 3812 3813 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr); 3814 rv = FALSE; 3815 PMAP_LOCK(pmap); 3816 l3e = pmap_pml3e(pmap, addr); 3817 if (l3e != NULL && (be64toh(*l3e) & (RPTE_LEAF | PG_V)) == PG_V) { 3818 pte = pmap_l3e_to_pte(l3e, addr); 3819 rv = (be64toh(*pte) & PG_V) == 0; 3820 } 3821 PMAP_UNLOCK(pmap); 3822 return (rv); 3823 } 3824 3825 boolean_t 3826 mmu_radix_is_referenced(vm_page_t m) 3827 { 3828 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3829 ("pmap_is_referenced: page %p is not managed", m)); 3830 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 3831 return (pmap_page_test_mappings(m, TRUE, FALSE)); 3832 } 3833 3834 /* 3835 * pmap_ts_referenced: 3836 * 3837 * Return a count of reference bits for a page, clearing those bits. 3838 * It is not necessary for every reference bit to be cleared, but it 3839 * is necessary that 0 only be returned when there are truly no 3840 * reference bits set. 3841 * 3842 * As an optimization, update the page's dirty field if a modified bit is 3843 * found while counting reference bits. This opportunistic update can be 3844 * performed at low cost and can eliminate the need for some future calls 3845 * to pmap_is_modified(). However, since this function stops after 3846 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 3847 * dirty pages. Those dirty pages will only be detected by a future call 3848 * to pmap_is_modified(). 3849 * 3850 * A DI block is not needed within this function, because 3851 * invalidations are performed before the PV list lock is 3852 * released. 3853 */ 3854 boolean_t 3855 mmu_radix_ts_referenced(vm_page_t m) 3856 { 3857 struct md_page *pvh; 3858 pv_entry_t pv, pvf; 3859 pmap_t pmap; 3860 struct rwlock *lock; 3861 pml3_entry_t oldl3e, *l3e; 3862 pt_entry_t *pte; 3863 vm_paddr_t pa; 3864 int cleared, md_gen, not_cleared, pvh_gen; 3865 struct spglist free; 3866 3867 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 3868 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3869 ("pmap_ts_referenced: page %p is not managed", m)); 3870 SLIST_INIT(&free); 3871 cleared = 0; 3872 pa = VM_PAGE_TO_PHYS(m); 3873 lock = PHYS_TO_PV_LIST_LOCK(pa); 3874 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa); 3875 rw_wlock(lock); 3876 retry: 3877 not_cleared = 0; 3878 if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) 3879 goto small_mappings; 3880 pv = pvf; 3881 do { 3882 if (pvf == NULL) 3883 pvf = pv; 3884 pmap = PV_PMAP(pv); 3885 if (!PMAP_TRYLOCK(pmap)) { 3886 pvh_gen = pvh->pv_gen; 3887 rw_wunlock(lock); 3888 PMAP_LOCK(pmap); 3889 rw_wlock(lock); 3890 if (pvh_gen != pvh->pv_gen) { 3891 PMAP_UNLOCK(pmap); 3892 goto retry; 3893 } 3894 } 3895 l3e = pmap_pml3e(pmap, pv->pv_va); 3896 oldl3e = be64toh(*l3e); 3897 if ((oldl3e & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 3898 /* 3899 * Although "oldpde" is mapping a 2MB page, because 3900 * this function is called at a 4KB page granularity, 3901 * we only update the 4KB page under test. 3902 */ 3903 vm_page_dirty(m); 3904 } 3905 if ((oldl3e & PG_A) != 0) { 3906 /* 3907 * Since this reference bit is shared by 512 4KB 3908 * pages, it should not be cleared every time it is 3909 * tested. Apply a simple "hash" function on the 3910 * physical page number, the virtual superpage number, 3911 * and the pmap address to select one 4KB page out of 3912 * the 512 on which testing the reference bit will 3913 * result in clearing that reference bit. This 3914 * function is designed to avoid the selection of the 3915 * same 4KB page for every 2MB page mapping. 3916 * 3917 * On demotion, a mapping that hasn't been referenced 3918 * is simply destroyed. To avoid the possibility of a 3919 * subsequent page fault on a demoted wired mapping, 3920 * always leave its reference bit set. Moreover, 3921 * since the superpage is wired, the current state of 3922 * its reference bit won't affect page replacement. 3923 */ 3924 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L3_PAGE_SIZE_SHIFT) ^ 3925 (uintptr_t)pmap) & (NPTEPG - 1)) == 0 && 3926 (oldl3e & PG_W) == 0) { 3927 atomic_clear_long(l3e, htobe64(PG_A)); 3928 pmap_invalidate_page(pmap, pv->pv_va); 3929 cleared++; 3930 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), 3931 ("inconsistent pv lock %p %p for page %p", 3932 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); 3933 } else 3934 not_cleared++; 3935 } 3936 PMAP_UNLOCK(pmap); 3937 /* Rotate the PV list if it has more than one entry. */ 3938 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) { 3939 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link); 3940 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link); 3941 pvh->pv_gen++; 3942 } 3943 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX) 3944 goto out; 3945 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); 3946 small_mappings: 3947 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) 3948 goto out; 3949 pv = pvf; 3950 do { 3951 if (pvf == NULL) 3952 pvf = pv; 3953 pmap = PV_PMAP(pv); 3954 if (!PMAP_TRYLOCK(pmap)) { 3955 pvh_gen = pvh->pv_gen; 3956 md_gen = m->md.pv_gen; 3957 rw_wunlock(lock); 3958 PMAP_LOCK(pmap); 3959 rw_wlock(lock); 3960 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { 3961 PMAP_UNLOCK(pmap); 3962 goto retry; 3963 } 3964 } 3965 l3e = pmap_pml3e(pmap, pv->pv_va); 3966 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0, 3967 ("pmap_ts_referenced: found a 2mpage in page %p's pv list", 3968 m)); 3969 pte = pmap_l3e_to_pte(l3e, pv->pv_va); 3970 if ((be64toh(*pte) & (PG_M | PG_RW)) == (PG_M | PG_RW)) 3971 vm_page_dirty(m); 3972 if ((be64toh(*pte) & PG_A) != 0) { 3973 atomic_clear_long(pte, htobe64(PG_A)); 3974 pmap_invalidate_page(pmap, pv->pv_va); 3975 cleared++; 3976 } 3977 PMAP_UNLOCK(pmap); 3978 /* Rotate the PV list if it has more than one entry. */ 3979 if (pv != NULL && TAILQ_NEXT(pv, pv_link) != NULL) { 3980 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link); 3981 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link); 3982 m->md.pv_gen++; 3983 } 3984 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared + 3985 not_cleared < PMAP_TS_REFERENCED_MAX); 3986 out: 3987 rw_wunlock(lock); 3988 vm_page_free_pages_toq(&free, true); 3989 return (cleared + not_cleared); 3990 } 3991 3992 static vm_offset_t 3993 mmu_radix_map(vm_offset_t *virt __unused, vm_paddr_t start, 3994 vm_paddr_t end, int prot __unused) 3995 { 3996 3997 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end, 3998 prot); 3999 return (PHYS_TO_DMAP(start)); 4000 } 4001 4002 void 4003 mmu_radix_object_init_pt(pmap_t pmap, vm_offset_t addr, 4004 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 4005 { 4006 pml3_entry_t *l3e; 4007 vm_paddr_t pa, ptepa; 4008 vm_page_t p, pdpg; 4009 vm_memattr_t ma; 4010 4011 CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr, 4012 object, pindex, size); 4013 VM_OBJECT_ASSERT_WLOCKED(object); 4014 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 4015 ("pmap_object_init_pt: non-device object")); 4016 /* NB: size can be logically ored with addr here */ 4017 if ((addr & L3_PAGE_MASK) == 0 && (size & L3_PAGE_MASK) == 0) { 4018 if (!mmu_radix_ps_enabled(pmap)) 4019 return; 4020 if (!vm_object_populate(object, pindex, pindex + atop(size))) 4021 return; 4022 p = vm_page_lookup(object, pindex); 4023 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4024 ("pmap_object_init_pt: invalid page %p", p)); 4025 ma = p->md.mdpg_cache_attrs; 4026 4027 /* 4028 * Abort the mapping if the first page is not physically 4029 * aligned to a 2MB page boundary. 4030 */ 4031 ptepa = VM_PAGE_TO_PHYS(p); 4032 if (ptepa & L3_PAGE_MASK) 4033 return; 4034 4035 /* 4036 * Skip the first page. Abort the mapping if the rest of 4037 * the pages are not physically contiguous or have differing 4038 * memory attributes. 4039 */ 4040 p = TAILQ_NEXT(p, listq); 4041 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size; 4042 pa += PAGE_SIZE) { 4043 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4044 ("pmap_object_init_pt: invalid page %p", p)); 4045 if (pa != VM_PAGE_TO_PHYS(p) || 4046 ma != p->md.mdpg_cache_attrs) 4047 return; 4048 p = TAILQ_NEXT(p, listq); 4049 } 4050 4051 PMAP_LOCK(pmap); 4052 for (pa = ptepa | pmap_cache_bits(ma); 4053 pa < ptepa + size; pa += L3_PAGE_SIZE) { 4054 pdpg = pmap_allocl3e(pmap, addr, NULL); 4055 if (pdpg == NULL) { 4056 /* 4057 * The creation of mappings below is only an 4058 * optimization. If a page directory page 4059 * cannot be allocated without blocking, 4060 * continue on to the next mapping rather than 4061 * blocking. 4062 */ 4063 addr += L3_PAGE_SIZE; 4064 continue; 4065 } 4066 l3e = (pml3_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg)); 4067 l3e = &l3e[pmap_pml3e_index(addr)]; 4068 if ((be64toh(*l3e) & PG_V) == 0) { 4069 pa |= PG_M | PG_A | PG_RW; 4070 pte_store(l3e, pa); 4071 pmap_resident_count_inc(pmap, L3_PAGE_SIZE / PAGE_SIZE); 4072 counter_u64_add(pmap_l3e_mappings, 1); 4073 } else { 4074 /* Continue on if the PDE is already valid. */ 4075 pdpg->ref_count--; 4076 KASSERT(pdpg->ref_count > 0, 4077 ("pmap_object_init_pt: missing reference " 4078 "to page directory page, va: 0x%lx", addr)); 4079 } 4080 addr += L3_PAGE_SIZE; 4081 } 4082 ptesync(); 4083 PMAP_UNLOCK(pmap); 4084 } 4085 } 4086 4087 boolean_t 4088 mmu_radix_page_exists_quick(pmap_t pmap, vm_page_t m) 4089 { 4090 struct md_page *pvh; 4091 struct rwlock *lock; 4092 pv_entry_t pv; 4093 int loops = 0; 4094 boolean_t rv; 4095 4096 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4097 ("pmap_page_exists_quick: page %p is not managed", m)); 4098 CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m); 4099 rv = FALSE; 4100 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 4101 rw_rlock(lock); 4102 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 4103 if (PV_PMAP(pv) == pmap) { 4104 rv = TRUE; 4105 break; 4106 } 4107 loops++; 4108 if (loops >= 16) 4109 break; 4110 } 4111 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { 4112 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4113 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) { 4114 if (PV_PMAP(pv) == pmap) { 4115 rv = TRUE; 4116 break; 4117 } 4118 loops++; 4119 if (loops >= 16) 4120 break; 4121 } 4122 } 4123 rw_runlock(lock); 4124 return (rv); 4125 } 4126 4127 void 4128 mmu_radix_page_init(vm_page_t m) 4129 { 4130 4131 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 4132 TAILQ_INIT(&m->md.pv_list); 4133 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT; 4134 } 4135 4136 int 4137 mmu_radix_page_wired_mappings(vm_page_t m) 4138 { 4139 struct rwlock *lock; 4140 struct md_page *pvh; 4141 pmap_t pmap; 4142 pt_entry_t *pte; 4143 pv_entry_t pv; 4144 int count, md_gen, pvh_gen; 4145 4146 if ((m->oflags & VPO_UNMANAGED) != 0) 4147 return (0); 4148 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 4149 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 4150 rw_rlock(lock); 4151 restart: 4152 count = 0; 4153 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 4154 pmap = PV_PMAP(pv); 4155 if (!PMAP_TRYLOCK(pmap)) { 4156 md_gen = m->md.pv_gen; 4157 rw_runlock(lock); 4158 PMAP_LOCK(pmap); 4159 rw_rlock(lock); 4160 if (md_gen != m->md.pv_gen) { 4161 PMAP_UNLOCK(pmap); 4162 goto restart; 4163 } 4164 } 4165 pte = pmap_pte(pmap, pv->pv_va); 4166 if ((be64toh(*pte) & PG_W) != 0) 4167 count++; 4168 PMAP_UNLOCK(pmap); 4169 } 4170 if ((m->flags & PG_FICTITIOUS) == 0) { 4171 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4172 TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) { 4173 pmap = PV_PMAP(pv); 4174 if (!PMAP_TRYLOCK(pmap)) { 4175 md_gen = m->md.pv_gen; 4176 pvh_gen = pvh->pv_gen; 4177 rw_runlock(lock); 4178 PMAP_LOCK(pmap); 4179 rw_rlock(lock); 4180 if (md_gen != m->md.pv_gen || 4181 pvh_gen != pvh->pv_gen) { 4182 PMAP_UNLOCK(pmap); 4183 goto restart; 4184 } 4185 } 4186 pte = pmap_pml3e(pmap, pv->pv_va); 4187 if ((be64toh(*pte) & PG_W) != 0) 4188 count++; 4189 PMAP_UNLOCK(pmap); 4190 } 4191 } 4192 rw_runlock(lock); 4193 return (count); 4194 } 4195 4196 static void 4197 mmu_radix_update_proctab(int pid, pml1_entry_t l1pa) 4198 { 4199 isa3_proctab[pid].proctab0 = htobe64(RTS_SIZE | l1pa | RADIX_PGD_INDEX_SHIFT); 4200 } 4201 4202 int 4203 mmu_radix_pinit(pmap_t pmap) 4204 { 4205 vmem_addr_t pid; 4206 vm_paddr_t l1pa; 4207 4208 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); 4209 4210 /* 4211 * allocate the page directory page 4212 */ 4213 pmap->pm_pml1 = uma_zalloc(zone_radix_pgd, M_WAITOK); 4214 4215 for (int j = 0; j < RADIX_PGD_SIZE_SHIFT; j++) 4216 pagezero((vm_offset_t)pmap->pm_pml1 + j * PAGE_SIZE); 4217 pmap->pm_radix.rt_root = 0; 4218 TAILQ_INIT(&pmap->pm_pvchunk); 4219 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 4220 pmap->pm_flags = PMAP_PDE_SUPERPAGE; 4221 vmem_alloc(asid_arena, 1, M_FIRSTFIT|M_WAITOK, &pid); 4222 4223 pmap->pm_pid = pid; 4224 l1pa = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml1); 4225 mmu_radix_update_proctab(pid, l1pa); 4226 __asm __volatile("ptesync;isync" : : : "memory"); 4227 4228 return (1); 4229 } 4230 4231 /* 4232 * This routine is called if the desired page table page does not exist. 4233 * 4234 * If page table page allocation fails, this routine may sleep before 4235 * returning NULL. It sleeps only if a lock pointer was given. 4236 * 4237 * Note: If a page allocation fails at page table level two or three, 4238 * one or two pages may be held during the wait, only to be released 4239 * afterwards. This conservative approach is easily argued to avoid 4240 * race conditions. 4241 */ 4242 static vm_page_t 4243 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) 4244 { 4245 vm_page_t m, pdppg, pdpg; 4246 4247 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4248 4249 /* 4250 * Allocate a page table page. 4251 */ 4252 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | 4253 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { 4254 if (lockp != NULL) { 4255 RELEASE_PV_LIST_LOCK(lockp); 4256 PMAP_UNLOCK(pmap); 4257 vm_wait(NULL); 4258 PMAP_LOCK(pmap); 4259 } 4260 /* 4261 * Indicate the need to retry. While waiting, the page table 4262 * page may have been allocated. 4263 */ 4264 return (NULL); 4265 } 4266 if ((m->flags & PG_ZERO) == 0) 4267 mmu_radix_zero_page(m); 4268 4269 /* 4270 * Map the pagetable page into the process address space, if 4271 * it isn't already there. 4272 */ 4273 4274 if (ptepindex >= (NUPDE + NUPDPE)) { 4275 pml1_entry_t *l1e; 4276 vm_pindex_t pml1index; 4277 4278 /* Wire up a new PDPE page */ 4279 pml1index = ptepindex - (NUPDE + NUPDPE); 4280 l1e = &pmap->pm_pml1[pml1index]; 4281 KASSERT((be64toh(*l1e) & PG_V) == 0, 4282 ("%s: L1 entry %#lx is valid", __func__, *l1e)); 4283 pde_store(l1e, VM_PAGE_TO_PHYS(m)); 4284 } else if (ptepindex >= NUPDE) { 4285 vm_pindex_t pml1index; 4286 vm_pindex_t pdpindex; 4287 pml1_entry_t *l1e; 4288 pml2_entry_t *l2e; 4289 4290 /* Wire up a new l2e page */ 4291 pdpindex = ptepindex - NUPDE; 4292 pml1index = pdpindex >> RPTE_SHIFT; 4293 4294 l1e = &pmap->pm_pml1[pml1index]; 4295 if ((be64toh(*l1e) & PG_V) == 0) { 4296 /* Have to allocate a new pdp, recurse */ 4297 if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml1index, 4298 lockp) == NULL) { 4299 vm_page_unwire_noq(m); 4300 vm_page_free_zero(m); 4301 return (NULL); 4302 } 4303 } else { 4304 /* Add reference to l2e page */ 4305 pdppg = PHYS_TO_VM_PAGE(be64toh(*l1e) & PG_FRAME); 4306 pdppg->ref_count++; 4307 } 4308 l2e = (pml2_entry_t *)PHYS_TO_DMAP(be64toh(*l1e) & PG_FRAME); 4309 4310 /* Now find the pdp page */ 4311 l2e = &l2e[pdpindex & RPTE_MASK]; 4312 KASSERT((be64toh(*l2e) & PG_V) == 0, 4313 ("%s: L2 entry %#lx is valid", __func__, *l2e)); 4314 pde_store(l2e, VM_PAGE_TO_PHYS(m)); 4315 } else { 4316 vm_pindex_t pml1index; 4317 vm_pindex_t pdpindex; 4318 pml1_entry_t *l1e; 4319 pml2_entry_t *l2e; 4320 pml3_entry_t *l3e; 4321 4322 /* Wire up a new PTE page */ 4323 pdpindex = ptepindex >> RPTE_SHIFT; 4324 pml1index = pdpindex >> RPTE_SHIFT; 4325 4326 /* First, find the pdp and check that its valid. */ 4327 l1e = &pmap->pm_pml1[pml1index]; 4328 if ((be64toh(*l1e) & PG_V) == 0) { 4329 /* Have to allocate a new pd, recurse */ 4330 if (_pmap_allocpte(pmap, NUPDE + pdpindex, 4331 lockp) == NULL) { 4332 vm_page_unwire_noq(m); 4333 vm_page_free_zero(m); 4334 return (NULL); 4335 } 4336 l2e = (pml2_entry_t *)PHYS_TO_DMAP(be64toh(*l1e) & PG_FRAME); 4337 l2e = &l2e[pdpindex & RPTE_MASK]; 4338 } else { 4339 l2e = (pml2_entry_t *)PHYS_TO_DMAP(be64toh(*l1e) & PG_FRAME); 4340 l2e = &l2e[pdpindex & RPTE_MASK]; 4341 if ((be64toh(*l2e) & PG_V) == 0) { 4342 /* Have to allocate a new pd, recurse */ 4343 if (_pmap_allocpte(pmap, NUPDE + pdpindex, 4344 lockp) == NULL) { 4345 vm_page_unwire_noq(m); 4346 vm_page_free_zero(m); 4347 return (NULL); 4348 } 4349 } else { 4350 /* Add reference to the pd page */ 4351 pdpg = PHYS_TO_VM_PAGE(be64toh(*l2e) & PG_FRAME); 4352 pdpg->ref_count++; 4353 } 4354 } 4355 l3e = (pml3_entry_t *)PHYS_TO_DMAP(be64toh(*l2e) & PG_FRAME); 4356 4357 /* Now we know where the page directory page is */ 4358 l3e = &l3e[ptepindex & RPTE_MASK]; 4359 KASSERT((be64toh(*l3e) & PG_V) == 0, 4360 ("%s: L3 entry %#lx is valid", __func__, *l3e)); 4361 pde_store(l3e, VM_PAGE_TO_PHYS(m)); 4362 } 4363 4364 pmap_resident_count_inc(pmap, 1); 4365 return (m); 4366 } 4367 static vm_page_t 4368 pmap_allocl3e(pmap_t pmap, vm_offset_t va, struct rwlock **lockp) 4369 { 4370 vm_pindex_t pdpindex, ptepindex; 4371 pml2_entry_t *pdpe; 4372 vm_page_t pdpg; 4373 4374 retry: 4375 pdpe = pmap_pml2e(pmap, va); 4376 if (pdpe != NULL && (be64toh(*pdpe) & PG_V) != 0) { 4377 /* Add a reference to the pd page. */ 4378 pdpg = PHYS_TO_VM_PAGE(be64toh(*pdpe) & PG_FRAME); 4379 pdpg->ref_count++; 4380 } else { 4381 /* Allocate a pd page. */ 4382 ptepindex = pmap_l3e_pindex(va); 4383 pdpindex = ptepindex >> RPTE_SHIFT; 4384 pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp); 4385 if (pdpg == NULL && lockp != NULL) 4386 goto retry; 4387 } 4388 return (pdpg); 4389 } 4390 4391 static vm_page_t 4392 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp) 4393 { 4394 vm_pindex_t ptepindex; 4395 pml3_entry_t *pd; 4396 vm_page_t m; 4397 4398 /* 4399 * Calculate pagetable page index 4400 */ 4401 ptepindex = pmap_l3e_pindex(va); 4402 retry: 4403 /* 4404 * Get the page directory entry 4405 */ 4406 pd = pmap_pml3e(pmap, va); 4407 4408 /* 4409 * This supports switching from a 2MB page to a 4410 * normal 4K page. 4411 */ 4412 if (pd != NULL && (be64toh(*pd) & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V)) { 4413 if (!pmap_demote_l3e_locked(pmap, pd, va, lockp)) { 4414 /* 4415 * Invalidation of the 2MB page mapping may have caused 4416 * the deallocation of the underlying PD page. 4417 */ 4418 pd = NULL; 4419 } 4420 } 4421 4422 /* 4423 * If the page table page is mapped, we just increment the 4424 * hold count, and activate it. 4425 */ 4426 if (pd != NULL && (be64toh(*pd) & PG_V) != 0) { 4427 m = PHYS_TO_VM_PAGE(be64toh(*pd) & PG_FRAME); 4428 m->ref_count++; 4429 } else { 4430 /* 4431 * Here if the pte page isn't mapped, or if it has been 4432 * deallocated. 4433 */ 4434 m = _pmap_allocpte(pmap, ptepindex, lockp); 4435 if (m == NULL && lockp != NULL) 4436 goto retry; 4437 } 4438 return (m); 4439 } 4440 4441 static void 4442 mmu_radix_pinit0(pmap_t pmap) 4443 { 4444 4445 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); 4446 PMAP_LOCK_INIT(pmap); 4447 pmap->pm_pml1 = kernel_pmap->pm_pml1; 4448 pmap->pm_pid = kernel_pmap->pm_pid; 4449 4450 pmap->pm_radix.rt_root = 0; 4451 TAILQ_INIT(&pmap->pm_pvchunk); 4452 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 4453 kernel_pmap->pm_flags = 4454 pmap->pm_flags = PMAP_PDE_SUPERPAGE; 4455 } 4456 /* 4457 * pmap_protect_l3e: do the things to protect a 2mpage in a process 4458 */ 4459 static boolean_t 4460 pmap_protect_l3e(pmap_t pmap, pt_entry_t *l3e, vm_offset_t sva, vm_prot_t prot) 4461 { 4462 pt_entry_t newpde, oldpde; 4463 vm_offset_t eva, va; 4464 vm_page_t m; 4465 boolean_t anychanged; 4466 4467 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4468 KASSERT((sva & L3_PAGE_MASK) == 0, 4469 ("pmap_protect_l3e: sva is not 2mpage aligned")); 4470 anychanged = FALSE; 4471 retry: 4472 oldpde = newpde = be64toh(*l3e); 4473 if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == 4474 (PG_MANAGED | PG_M | PG_RW)) { 4475 eva = sva + L3_PAGE_SIZE; 4476 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 4477 va < eva; va += PAGE_SIZE, m++) 4478 vm_page_dirty(m); 4479 } 4480 if ((prot & VM_PROT_WRITE) == 0) { 4481 newpde &= ~(PG_RW | PG_M); 4482 newpde |= RPTE_EAA_R; 4483 } 4484 if (prot & VM_PROT_EXECUTE) 4485 newpde |= PG_X; 4486 if (newpde != oldpde) { 4487 /* 4488 * As an optimization to future operations on this PDE, clear 4489 * PG_PROMOTED. The impending invalidation will remove any 4490 * lingering 4KB page mappings from the TLB. 4491 */ 4492 if (!atomic_cmpset_long(l3e, htobe64(oldpde), htobe64(newpde & ~PG_PROMOTED))) 4493 goto retry; 4494 anychanged = TRUE; 4495 } 4496 return (anychanged); 4497 } 4498 4499 void 4500 mmu_radix_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 4501 vm_prot_t prot) 4502 { 4503 vm_offset_t va_next; 4504 pml1_entry_t *l1e; 4505 pml2_entry_t *l2e; 4506 pml3_entry_t ptpaddr, *l3e; 4507 pt_entry_t *pte; 4508 boolean_t anychanged; 4509 4510 CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, sva, eva, 4511 prot); 4512 4513 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); 4514 if (prot == VM_PROT_NONE) { 4515 mmu_radix_remove(pmap, sva, eva); 4516 return; 4517 } 4518 4519 if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == 4520 (VM_PROT_WRITE|VM_PROT_EXECUTE)) 4521 return; 4522 4523 #ifdef INVARIANTS 4524 if (VERBOSE_PROTECT || pmap_logging) 4525 printf("pmap_protect(%p, %#lx, %#lx, %x) - asid: %lu\n", 4526 pmap, sva, eva, prot, pmap->pm_pid); 4527 #endif 4528 anychanged = FALSE; 4529 4530 PMAP_LOCK(pmap); 4531 for (; sva < eva; sva = va_next) { 4532 l1e = pmap_pml1e(pmap, sva); 4533 if ((be64toh(*l1e) & PG_V) == 0) { 4534 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK; 4535 if (va_next < sva) 4536 va_next = eva; 4537 continue; 4538 } 4539 4540 l2e = pmap_l1e_to_l2e(l1e, sva); 4541 if ((be64toh(*l2e) & PG_V) == 0) { 4542 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK; 4543 if (va_next < sva) 4544 va_next = eva; 4545 continue; 4546 } 4547 4548 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK; 4549 if (va_next < sva) 4550 va_next = eva; 4551 4552 l3e = pmap_l2e_to_l3e(l2e, sva); 4553 ptpaddr = be64toh(*l3e); 4554 4555 /* 4556 * Weed out invalid mappings. 4557 */ 4558 if (ptpaddr == 0) 4559 continue; 4560 4561 /* 4562 * Check for large page. 4563 */ 4564 if ((ptpaddr & RPTE_LEAF) != 0) { 4565 /* 4566 * Are we protecting the entire large page? If not, 4567 * demote the mapping and fall through. 4568 */ 4569 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) { 4570 if (pmap_protect_l3e(pmap, l3e, sva, prot)) 4571 anychanged = TRUE; 4572 continue; 4573 } else if (!pmap_demote_l3e(pmap, l3e, sva)) { 4574 /* 4575 * The large page mapping was destroyed. 4576 */ 4577 continue; 4578 } 4579 } 4580 4581 if (va_next > eva) 4582 va_next = eva; 4583 4584 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++, 4585 sva += PAGE_SIZE) { 4586 pt_entry_t obits, pbits; 4587 vm_page_t m; 4588 4589 retry: 4590 MPASS(pte == pmap_pte(pmap, sva)); 4591 obits = pbits = be64toh(*pte); 4592 if ((pbits & PG_V) == 0) 4593 continue; 4594 4595 if ((prot & VM_PROT_WRITE) == 0) { 4596 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) == 4597 (PG_MANAGED | PG_M | PG_RW)) { 4598 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); 4599 vm_page_dirty(m); 4600 } 4601 pbits &= ~(PG_RW | PG_M); 4602 pbits |= RPTE_EAA_R; 4603 } 4604 if (prot & VM_PROT_EXECUTE) 4605 pbits |= PG_X; 4606 4607 if (pbits != obits) { 4608 if (!atomic_cmpset_long(pte, htobe64(obits), htobe64(pbits))) 4609 goto retry; 4610 if (obits & (PG_A|PG_M)) { 4611 anychanged = TRUE; 4612 #ifdef INVARIANTS 4613 if (VERBOSE_PROTECT || pmap_logging) 4614 printf("%#lx %#lx -> %#lx\n", 4615 sva, obits, pbits); 4616 #endif 4617 } 4618 } 4619 } 4620 } 4621 if (anychanged) 4622 pmap_invalidate_all(pmap); 4623 PMAP_UNLOCK(pmap); 4624 } 4625 4626 void 4627 mmu_radix_qenter(vm_offset_t sva, vm_page_t *ma, int count) 4628 { 4629 4630 CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, sva, ma, count); 4631 pt_entry_t oldpte, pa, *pte; 4632 vm_page_t m; 4633 uint64_t cache_bits, attr_bits; 4634 vm_offset_t va; 4635 4636 oldpte = 0; 4637 attr_bits = RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A; 4638 va = sva; 4639 pte = kvtopte(va); 4640 while (va < sva + PAGE_SIZE * count) { 4641 if (__predict_false((va & L3_PAGE_MASK) == 0)) 4642 pte = kvtopte(va); 4643 MPASS(pte == pmap_pte(kernel_pmap, va)); 4644 4645 /* 4646 * XXX there has to be a more efficient way than traversing 4647 * the page table every time - but go for correctness for 4648 * today 4649 */ 4650 4651 m = *ma++; 4652 cache_bits = pmap_cache_bits(m->md.mdpg_cache_attrs); 4653 pa = VM_PAGE_TO_PHYS(m) | cache_bits | attr_bits; 4654 if (be64toh(*pte) != pa) { 4655 oldpte |= be64toh(*pte); 4656 pte_store(pte, pa); 4657 } 4658 va += PAGE_SIZE; 4659 pte++; 4660 } 4661 if (__predict_false((oldpte & RPTE_VALID) != 0)) 4662 pmap_invalidate_range(kernel_pmap, sva, sva + count * 4663 PAGE_SIZE); 4664 else 4665 ptesync(); 4666 } 4667 4668 void 4669 mmu_radix_qremove(vm_offset_t sva, int count) 4670 { 4671 vm_offset_t va; 4672 pt_entry_t *pte; 4673 4674 CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, sva, count); 4675 KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode or dmap va %lx", sva)); 4676 4677 va = sva; 4678 pte = kvtopte(va); 4679 while (va < sva + PAGE_SIZE * count) { 4680 if (__predict_false((va & L3_PAGE_MASK) == 0)) 4681 pte = kvtopte(va); 4682 pte_clear(pte); 4683 pte++; 4684 va += PAGE_SIZE; 4685 } 4686 pmap_invalidate_range(kernel_pmap, sva, va); 4687 } 4688 4689 /*************************************************** 4690 * Page table page management routines..... 4691 ***************************************************/ 4692 /* 4693 * Schedule the specified unused page table page to be freed. Specifically, 4694 * add the page to the specified list of pages that will be released to the 4695 * physical memory manager after the TLB has been updated. 4696 */ 4697 static __inline void 4698 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, 4699 boolean_t set_PG_ZERO) 4700 { 4701 4702 if (set_PG_ZERO) 4703 m->flags |= PG_ZERO; 4704 else 4705 m->flags &= ~PG_ZERO; 4706 SLIST_INSERT_HEAD(free, m, plinks.s.ss); 4707 } 4708 4709 /* 4710 * Inserts the specified page table page into the specified pmap's collection 4711 * of idle page table pages. Each of a pmap's page table pages is responsible 4712 * for mapping a distinct range of virtual addresses. The pmap's collection is 4713 * ordered by this virtual address range. 4714 */ 4715 static __inline int 4716 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte) 4717 { 4718 4719 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4720 return (vm_radix_insert(&pmap->pm_radix, mpte)); 4721 } 4722 4723 /* 4724 * Removes the page table page mapping the specified virtual address from the 4725 * specified pmap's collection of idle page table pages, and returns it. 4726 * Otherwise, returns NULL if there is no page table page corresponding to the 4727 * specified virtual address. 4728 */ 4729 static __inline vm_page_t 4730 pmap_remove_pt_page(pmap_t pmap, vm_offset_t va) 4731 { 4732 4733 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4734 return (vm_radix_remove(&pmap->pm_radix, pmap_l3e_pindex(va))); 4735 } 4736 4737 /* 4738 * Decrements a page table page's wire count, which is used to record the 4739 * number of valid page table entries within the page. If the wire count 4740 * drops to zero, then the page table page is unmapped. Returns TRUE if the 4741 * page table page was unmapped and FALSE otherwise. 4742 */ 4743 static inline boolean_t 4744 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) 4745 { 4746 4747 --m->ref_count; 4748 if (m->ref_count == 0) { 4749 _pmap_unwire_ptp(pmap, va, m, free); 4750 return (TRUE); 4751 } else 4752 return (FALSE); 4753 } 4754 4755 static void 4756 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) 4757 { 4758 4759 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4760 /* 4761 * unmap the page table page 4762 */ 4763 if (m->pindex >= NUPDE + NUPDPE) { 4764 /* PDP page */ 4765 pml1_entry_t *pml1; 4766 pml1 = pmap_pml1e(pmap, va); 4767 *pml1 = 0; 4768 } else if (m->pindex >= NUPDE) { 4769 /* PD page */ 4770 pml2_entry_t *l2e; 4771 l2e = pmap_pml2e(pmap, va); 4772 *l2e = 0; 4773 } else { 4774 /* PTE page */ 4775 pml3_entry_t *l3e; 4776 l3e = pmap_pml3e(pmap, va); 4777 *l3e = 0; 4778 } 4779 pmap_resident_count_dec(pmap, 1); 4780 if (m->pindex < NUPDE) { 4781 /* We just released a PT, unhold the matching PD */ 4782 vm_page_t pdpg; 4783 4784 pdpg = PHYS_TO_VM_PAGE(be64toh(*pmap_pml2e(pmap, va)) & PG_FRAME); 4785 pmap_unwire_ptp(pmap, va, pdpg, free); 4786 } 4787 else if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) { 4788 /* We just released a PD, unhold the matching PDP */ 4789 vm_page_t pdppg; 4790 4791 pdppg = PHYS_TO_VM_PAGE(be64toh(*pmap_pml1e(pmap, va)) & PG_FRAME); 4792 pmap_unwire_ptp(pmap, va, pdppg, free); 4793 } 4794 4795 /* 4796 * Put page on a list so that it is released after 4797 * *ALL* TLB shootdown is done 4798 */ 4799 pmap_add_delayed_free_list(m, free, TRUE); 4800 } 4801 4802 /* 4803 * After removing a page table entry, this routine is used to 4804 * conditionally free the page, and manage the hold/wire counts. 4805 */ 4806 static int 4807 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pml3_entry_t ptepde, 4808 struct spglist *free) 4809 { 4810 vm_page_t mpte; 4811 4812 if (va >= VM_MAXUSER_ADDRESS) 4813 return (0); 4814 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0")); 4815 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME); 4816 return (pmap_unwire_ptp(pmap, va, mpte, free)); 4817 } 4818 4819 void 4820 mmu_radix_release(pmap_t pmap) 4821 { 4822 4823 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); 4824 KASSERT(pmap->pm_stats.resident_count == 0, 4825 ("pmap_release: pmap resident count %ld != 0", 4826 pmap->pm_stats.resident_count)); 4827 KASSERT(vm_radix_is_empty(&pmap->pm_radix), 4828 ("pmap_release: pmap has reserved page table page(s)")); 4829 4830 pmap_invalidate_all(pmap); 4831 isa3_proctab[pmap->pm_pid].proctab0 = 0; 4832 uma_zfree(zone_radix_pgd, pmap->pm_pml1); 4833 vmem_free(asid_arena, pmap->pm_pid, 1); 4834 } 4835 4836 /* 4837 * Create the PV entry for a 2MB page mapping. Always returns true unless the 4838 * flag PMAP_ENTER_NORECLAIM is specified. If that flag is specified, returns 4839 * false if the PV entry cannot be allocated without resorting to reclamation. 4840 */ 4841 static bool 4842 pmap_pv_insert_l3e(pmap_t pmap, vm_offset_t va, pml3_entry_t pde, u_int flags, 4843 struct rwlock **lockp) 4844 { 4845 struct md_page *pvh; 4846 pv_entry_t pv; 4847 vm_paddr_t pa; 4848 4849 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4850 /* Pass NULL instead of the lock pointer to disable reclamation. */ 4851 if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ? 4852 NULL : lockp)) == NULL) 4853 return (false); 4854 pv->pv_va = va; 4855 pa = pde & PG_PS_FRAME; 4856 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); 4857 pvh = pa_to_pvh(pa); 4858 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_link); 4859 pvh->pv_gen++; 4860 return (true); 4861 } 4862 4863 /* 4864 * Fills a page table page with mappings to consecutive physical pages. 4865 */ 4866 static void 4867 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte) 4868 { 4869 pt_entry_t *pte; 4870 4871 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) { 4872 *pte = htobe64(newpte); 4873 newpte += PAGE_SIZE; 4874 } 4875 } 4876 4877 static boolean_t 4878 pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va) 4879 { 4880 struct rwlock *lock; 4881 boolean_t rv; 4882 4883 lock = NULL; 4884 rv = pmap_demote_l3e_locked(pmap, pde, va, &lock); 4885 if (lock != NULL) 4886 rw_wunlock(lock); 4887 return (rv); 4888 } 4889 4890 static boolean_t 4891 pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va, 4892 struct rwlock **lockp) 4893 { 4894 pml3_entry_t oldpde; 4895 pt_entry_t *firstpte; 4896 vm_paddr_t mptepa; 4897 vm_page_t mpte; 4898 struct spglist free; 4899 vm_offset_t sva; 4900 4901 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4902 oldpde = be64toh(*l3e); 4903 KASSERT((oldpde & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V), 4904 ("pmap_demote_l3e: oldpde is missing RPTE_LEAF and/or PG_V %lx", 4905 oldpde)); 4906 if ((oldpde & PG_A) == 0 || (mpte = pmap_remove_pt_page(pmap, va)) == 4907 NULL) { 4908 KASSERT((oldpde & PG_W) == 0, 4909 ("pmap_demote_l3e: page table page for a wired mapping" 4910 " is missing")); 4911 4912 /* 4913 * Invalidate the 2MB page mapping and return "failure" if the 4914 * mapping was never accessed or the allocation of the new 4915 * page table page fails. If the 2MB page mapping belongs to 4916 * the direct map region of the kernel's address space, then 4917 * the page allocation request specifies the highest possible 4918 * priority (VM_ALLOC_INTERRUPT). Otherwise, the priority is 4919 * normal. Page table pages are preallocated for every other 4920 * part of the kernel address space, so the direct map region 4921 * is the only part of the kernel address space that must be 4922 * handled here. 4923 */ 4924 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL, 4925 pmap_l3e_pindex(va), (va >= DMAP_MIN_ADDRESS && va < 4926 DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | 4927 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 4928 SLIST_INIT(&free); 4929 sva = trunc_2mpage(va); 4930 pmap_remove_l3e(pmap, l3e, sva, &free, lockp); 4931 pmap_invalidate_l3e_page(pmap, sva, oldpde); 4932 vm_page_free_pages_toq(&free, true); 4933 CTR2(KTR_PMAP, "pmap_demote_l3e: failure for va %#lx" 4934 " in pmap %p", va, pmap); 4935 return (FALSE); 4936 } 4937 if (va < VM_MAXUSER_ADDRESS) 4938 pmap_resident_count_inc(pmap, 1); 4939 } 4940 mptepa = VM_PAGE_TO_PHYS(mpte); 4941 firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa); 4942 KASSERT((oldpde & PG_A) != 0, 4943 ("pmap_demote_l3e: oldpde is missing PG_A")); 4944 KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW, 4945 ("pmap_demote_l3e: oldpde is missing PG_M")); 4946 4947 /* 4948 * If the page table page is new, initialize it. 4949 */ 4950 if (mpte->ref_count == 1) { 4951 mpte->ref_count = NPTEPG; 4952 pmap_fill_ptp(firstpte, oldpde); 4953 } 4954 4955 KASSERT((be64toh(*firstpte) & PG_FRAME) == (oldpde & PG_FRAME), 4956 ("pmap_demote_l3e: firstpte and newpte map different physical" 4957 " addresses")); 4958 4959 /* 4960 * If the mapping has changed attributes, update the page table 4961 * entries. 4962 */ 4963 if ((be64toh(*firstpte) & PG_PTE_PROMOTE) != (oldpde & PG_PTE_PROMOTE)) 4964 pmap_fill_ptp(firstpte, oldpde); 4965 4966 /* 4967 * The spare PV entries must be reserved prior to demoting the 4968 * mapping, that is, prior to changing the PDE. Otherwise, the state 4969 * of the PDE and the PV lists will be inconsistent, which can result 4970 * in reclaim_pv_chunk() attempting to remove a PV entry from the 4971 * wrong PV list and pmap_pv_demote_l3e() failing to find the expected 4972 * PV entry for the 2MB page mapping that is being demoted. 4973 */ 4974 if ((oldpde & PG_MANAGED) != 0) 4975 reserve_pv_entries(pmap, NPTEPG - 1, lockp); 4976 4977 /* 4978 * Demote the mapping. This pmap is locked. The old PDE has 4979 * PG_A set. If the old PDE has PG_RW set, it also has PG_M 4980 * set. Thus, there is no danger of a race with another 4981 * processor changing the setting of PG_A and/or PG_M between 4982 * the read above and the store below. 4983 */ 4984 pde_store(l3e, mptepa); 4985 pmap_invalidate_l3e_page(pmap, trunc_2mpage(va), oldpde); 4986 /* 4987 * Demote the PV entry. 4988 */ 4989 if ((oldpde & PG_MANAGED) != 0) 4990 pmap_pv_demote_l3e(pmap, va, oldpde & PG_PS_FRAME, lockp); 4991 4992 counter_u64_add(pmap_l3e_demotions, 1); 4993 CTR2(KTR_PMAP, "pmap_demote_l3e: success for va %#lx" 4994 " in pmap %p", va, pmap); 4995 return (TRUE); 4996 } 4997 4998 /* 4999 * pmap_remove_kernel_pde: Remove a kernel superpage mapping. 5000 */ 5001 static void 5002 pmap_remove_kernel_l3e(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va) 5003 { 5004 vm_paddr_t mptepa; 5005 vm_page_t mpte; 5006 5007 KASSERT(pmap == kernel_pmap, ("pmap %p is not kernel_pmap", pmap)); 5008 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 5009 mpte = pmap_remove_pt_page(pmap, va); 5010 if (mpte == NULL) 5011 panic("pmap_remove_kernel_pde: Missing pt page."); 5012 5013 mptepa = VM_PAGE_TO_PHYS(mpte); 5014 5015 /* 5016 * Initialize the page table page. 5017 */ 5018 pagezero(PHYS_TO_DMAP(mptepa)); 5019 5020 /* 5021 * Demote the mapping. 5022 */ 5023 pde_store(l3e, mptepa); 5024 ptesync(); 5025 } 5026 5027 /* 5028 * pmap_remove_l3e: do the things to unmap a superpage in a process 5029 */ 5030 static int 5031 pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva, 5032 struct spglist *free, struct rwlock **lockp) 5033 { 5034 struct md_page *pvh; 5035 pml3_entry_t oldpde; 5036 vm_offset_t eva, va; 5037 vm_page_t m, mpte; 5038 5039 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 5040 KASSERT((sva & L3_PAGE_MASK) == 0, 5041 ("pmap_remove_l3e: sva is not 2mpage aligned")); 5042 oldpde = be64toh(pte_load_clear(pdq)); 5043 if (oldpde & PG_W) 5044 pmap->pm_stats.wired_count -= (L3_PAGE_SIZE / PAGE_SIZE); 5045 pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE); 5046 if (oldpde & PG_MANAGED) { 5047 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME); 5048 pvh = pa_to_pvh(oldpde & PG_PS_FRAME); 5049 pmap_pvh_free(pvh, pmap, sva); 5050 eva = sva + L3_PAGE_SIZE; 5051 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); 5052 va < eva; va += PAGE_SIZE, m++) { 5053 if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5054 vm_page_dirty(m); 5055 if (oldpde & PG_A) 5056 vm_page_aflag_set(m, PGA_REFERENCED); 5057 if (TAILQ_EMPTY(&m->md.pv_list) && 5058 TAILQ_EMPTY(&pvh->pv_list)) 5059 vm_page_aflag_clear(m, PGA_WRITEABLE); 5060 } 5061 } 5062 if (pmap == kernel_pmap) { 5063 pmap_remove_kernel_l3e(pmap, pdq, sva); 5064 } else { 5065 mpte = pmap_remove_pt_page(pmap, sva); 5066 if (mpte != NULL) { 5067 pmap_resident_count_dec(pmap, 1); 5068 KASSERT(mpte->ref_count == NPTEPG, 5069 ("pmap_remove_l3e: pte page wire count error")); 5070 mpte->ref_count = 0; 5071 pmap_add_delayed_free_list(mpte, free, FALSE); 5072 } 5073 } 5074 return (pmap_unuse_pt(pmap, sva, be64toh(*pmap_pml2e(pmap, sva)), free)); 5075 } 5076 5077 /* 5078 * pmap_remove_pte: do the things to unmap a page in a process 5079 */ 5080 static int 5081 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, 5082 pml3_entry_t ptepde, struct spglist *free, struct rwlock **lockp) 5083 { 5084 struct md_page *pvh; 5085 pt_entry_t oldpte; 5086 vm_page_t m; 5087 5088 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 5089 oldpte = be64toh(pte_load_clear(ptq)); 5090 if (oldpte & RPTE_WIRED) 5091 pmap->pm_stats.wired_count -= 1; 5092 pmap_resident_count_dec(pmap, 1); 5093 if (oldpte & RPTE_MANAGED) { 5094 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME); 5095 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5096 vm_page_dirty(m); 5097 if (oldpte & PG_A) 5098 vm_page_aflag_set(m, PGA_REFERENCED); 5099 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); 5100 pmap_pvh_free(&m->md, pmap, va); 5101 if (TAILQ_EMPTY(&m->md.pv_list) && 5102 (m->flags & PG_FICTITIOUS) == 0) { 5103 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5104 if (TAILQ_EMPTY(&pvh->pv_list)) 5105 vm_page_aflag_clear(m, PGA_WRITEABLE); 5106 } 5107 } 5108 return (pmap_unuse_pt(pmap, va, ptepde, free)); 5109 } 5110 5111 /* 5112 * Remove a single page from a process address space 5113 */ 5114 static bool 5115 pmap_remove_page(pmap_t pmap, vm_offset_t va, pml3_entry_t *l3e, 5116 struct spglist *free) 5117 { 5118 struct rwlock *lock; 5119 pt_entry_t *pte; 5120 bool invalidate_all; 5121 5122 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 5123 if ((be64toh(*l3e) & RPTE_VALID) == 0) { 5124 return (false); 5125 } 5126 pte = pmap_l3e_to_pte(l3e, va); 5127 if ((be64toh(*pte) & RPTE_VALID) == 0) { 5128 return (false); 5129 } 5130 lock = NULL; 5131 5132 invalidate_all = pmap_remove_pte(pmap, pte, va, be64toh(*l3e), free, &lock); 5133 if (lock != NULL) 5134 rw_wunlock(lock); 5135 if (!invalidate_all) 5136 pmap_invalidate_page(pmap, va); 5137 return (invalidate_all); 5138 } 5139 5140 /* 5141 * Removes the specified range of addresses from the page table page. 5142 */ 5143 static bool 5144 pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 5145 pml3_entry_t *l3e, struct spglist *free, struct rwlock **lockp) 5146 { 5147 pt_entry_t *pte; 5148 vm_offset_t va; 5149 bool anyvalid; 5150 5151 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 5152 anyvalid = false; 5153 va = eva; 5154 for (pte = pmap_l3e_to_pte(l3e, sva); sva != eva; pte++, 5155 sva += PAGE_SIZE) { 5156 MPASS(pte == pmap_pte(pmap, sva)); 5157 if (*pte == 0) { 5158 if (va != eva) { 5159 anyvalid = true; 5160 va = eva; 5161 } 5162 continue; 5163 } 5164 if (va == eva) 5165 va = sva; 5166 if (pmap_remove_pte(pmap, pte, sva, be64toh(*l3e), free, lockp)) { 5167 anyvalid = true; 5168 sva += PAGE_SIZE; 5169 break; 5170 } 5171 } 5172 if (anyvalid) 5173 pmap_invalidate_all(pmap); 5174 else if (va != eva) 5175 pmap_invalidate_range(pmap, va, sva); 5176 return (anyvalid); 5177 } 5178 5179 void 5180 mmu_radix_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 5181 { 5182 struct rwlock *lock; 5183 vm_offset_t va_next; 5184 pml1_entry_t *l1e; 5185 pml2_entry_t *l2e; 5186 pml3_entry_t ptpaddr, *l3e; 5187 struct spglist free; 5188 bool anyvalid; 5189 5190 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva); 5191 5192 /* 5193 * Perform an unsynchronized read. This is, however, safe. 5194 */ 5195 if (pmap->pm_stats.resident_count == 0) 5196 return; 5197 5198 anyvalid = false; 5199 SLIST_INIT(&free); 5200 5201 /* XXX something fishy here */ 5202 sva = (sva + PAGE_MASK) & ~PAGE_MASK; 5203 eva = (eva + PAGE_MASK) & ~PAGE_MASK; 5204 5205 PMAP_LOCK(pmap); 5206 5207 /* 5208 * special handling of removing one page. a very 5209 * common operation and easy to short circuit some 5210 * code. 5211 */ 5212 if (sva + PAGE_SIZE == eva) { 5213 l3e = pmap_pml3e(pmap, sva); 5214 if (l3e && (be64toh(*l3e) & RPTE_LEAF) == 0) { 5215 anyvalid = pmap_remove_page(pmap, sva, l3e, &free); 5216 goto out; 5217 } 5218 } 5219 5220 lock = NULL; 5221 for (; sva < eva; sva = va_next) { 5222 if (pmap->pm_stats.resident_count == 0) 5223 break; 5224 l1e = pmap_pml1e(pmap, sva); 5225 if (l1e == NULL || (be64toh(*l1e) & PG_V) == 0) { 5226 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK; 5227 if (va_next < sva) 5228 va_next = eva; 5229 continue; 5230 } 5231 5232 l2e = pmap_l1e_to_l2e(l1e, sva); 5233 if (l2e == NULL || (be64toh(*l2e) & PG_V) == 0) { 5234 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK; 5235 if (va_next < sva) 5236 va_next = eva; 5237 continue; 5238 } 5239 5240 /* 5241 * Calculate index for next page table. 5242 */ 5243 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK; 5244 if (va_next < sva) 5245 va_next = eva; 5246 5247 l3e = pmap_l2e_to_l3e(l2e, sva); 5248 ptpaddr = be64toh(*l3e); 5249 5250 /* 5251 * Weed out invalid mappings. 5252 */ 5253 if (ptpaddr == 0) 5254 continue; 5255 5256 /* 5257 * Check for large page. 5258 */ 5259 if ((ptpaddr & RPTE_LEAF) != 0) { 5260 /* 5261 * Are we removing the entire large page? If not, 5262 * demote the mapping and fall through. 5263 */ 5264 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) { 5265 pmap_remove_l3e(pmap, l3e, sva, &free, &lock); 5266 anyvalid = true; 5267 continue; 5268 } else if (!pmap_demote_l3e_locked(pmap, l3e, sva, 5269 &lock)) { 5270 /* The large page mapping was destroyed. */ 5271 continue; 5272 } else 5273 ptpaddr = be64toh(*l3e); 5274 } 5275 5276 /* 5277 * Limit our scan to either the end of the va represented 5278 * by the current page table page, or to the end of the 5279 * range being removed. 5280 */ 5281 if (va_next > eva) 5282 va_next = eva; 5283 5284 if (pmap_remove_ptes(pmap, sva, va_next, l3e, &free, &lock)) 5285 anyvalid = true; 5286 } 5287 if (lock != NULL) 5288 rw_wunlock(lock); 5289 out: 5290 if (anyvalid) 5291 pmap_invalidate_all(pmap); 5292 PMAP_UNLOCK(pmap); 5293 vm_page_free_pages_toq(&free, true); 5294 } 5295 5296 void 5297 mmu_radix_remove_all(vm_page_t m) 5298 { 5299 struct md_page *pvh; 5300 pv_entry_t pv; 5301 pmap_t pmap; 5302 struct rwlock *lock; 5303 pt_entry_t *pte, tpte; 5304 pml3_entry_t *l3e; 5305 vm_offset_t va; 5306 struct spglist free; 5307 int pvh_gen, md_gen; 5308 5309 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 5310 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5311 ("pmap_remove_all: page %p is not managed", m)); 5312 SLIST_INIT(&free); 5313 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 5314 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : 5315 pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5316 retry: 5317 rw_wlock(lock); 5318 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { 5319 pmap = PV_PMAP(pv); 5320 if (!PMAP_TRYLOCK(pmap)) { 5321 pvh_gen = pvh->pv_gen; 5322 rw_wunlock(lock); 5323 PMAP_LOCK(pmap); 5324 rw_wlock(lock); 5325 if (pvh_gen != pvh->pv_gen) { 5326 rw_wunlock(lock); 5327 PMAP_UNLOCK(pmap); 5328 goto retry; 5329 } 5330 } 5331 va = pv->pv_va; 5332 l3e = pmap_pml3e(pmap, va); 5333 (void)pmap_demote_l3e_locked(pmap, l3e, va, &lock); 5334 PMAP_UNLOCK(pmap); 5335 } 5336 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 5337 pmap = PV_PMAP(pv); 5338 if (!PMAP_TRYLOCK(pmap)) { 5339 pvh_gen = pvh->pv_gen; 5340 md_gen = m->md.pv_gen; 5341 rw_wunlock(lock); 5342 PMAP_LOCK(pmap); 5343 rw_wlock(lock); 5344 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) { 5345 rw_wunlock(lock); 5346 PMAP_UNLOCK(pmap); 5347 goto retry; 5348 } 5349 } 5350 pmap_resident_count_dec(pmap, 1); 5351 l3e = pmap_pml3e(pmap, pv->pv_va); 5352 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0, ("pmap_remove_all: found" 5353 " a 2mpage in page %p's pv list", m)); 5354 pte = pmap_l3e_to_pte(l3e, pv->pv_va); 5355 tpte = be64toh(pte_load_clear(pte)); 5356 if (tpte & PG_W) 5357 pmap->pm_stats.wired_count--; 5358 if (tpte & PG_A) 5359 vm_page_aflag_set(m, PGA_REFERENCED); 5360 5361 /* 5362 * Update the vm_page_t clean and reference bits. 5363 */ 5364 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5365 vm_page_dirty(m); 5366 pmap_unuse_pt(pmap, pv->pv_va, be64toh(*l3e), &free); 5367 pmap_invalidate_page(pmap, pv->pv_va); 5368 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link); 5369 m->md.pv_gen++; 5370 free_pv_entry(pmap, pv); 5371 PMAP_UNLOCK(pmap); 5372 } 5373 vm_page_aflag_clear(m, PGA_WRITEABLE); 5374 rw_wunlock(lock); 5375 vm_page_free_pages_toq(&free, true); 5376 } 5377 5378 /* 5379 * Destroy all managed, non-wired mappings in the given user-space 5380 * pmap. This pmap cannot be active on any processor besides the 5381 * caller. 5382 * 5383 * This function cannot be applied to the kernel pmap. Moreover, it 5384 * is not intended for general use. It is only to be used during 5385 * process termination. Consequently, it can be implemented in ways 5386 * that make it faster than pmap_remove(). First, it can more quickly 5387 * destroy mappings by iterating over the pmap's collection of PV 5388 * entries, rather than searching the page table. Second, it doesn't 5389 * have to test and clear the page table entries atomically, because 5390 * no processor is currently accessing the user address space. In 5391 * particular, a page table entry's dirty bit won't change state once 5392 * this function starts. 5393 * 5394 * Although this function destroys all of the pmap's managed, 5395 * non-wired mappings, it can delay and batch the invalidation of TLB 5396 * entries without calling pmap_delayed_invl_started() and 5397 * pmap_delayed_invl_finished(). Because the pmap is not active on 5398 * any other processor, none of these TLB entries will ever be used 5399 * before their eventual invalidation. Consequently, there is no need 5400 * for either pmap_remove_all() or pmap_remove_write() to wait for 5401 * that eventual TLB invalidation. 5402 */ 5403 5404 void 5405 mmu_radix_remove_pages(pmap_t pmap) 5406 { 5407 5408 CTR2(KTR_PMAP, "%s(%p)", __func__, pmap); 5409 pml3_entry_t ptel3e; 5410 pt_entry_t *pte, tpte; 5411 struct spglist free; 5412 vm_page_t m, mpte, mt; 5413 pv_entry_t pv; 5414 struct md_page *pvh; 5415 struct pv_chunk *pc, *npc; 5416 struct rwlock *lock; 5417 int64_t bit; 5418 uint64_t inuse, bitmask; 5419 int allfree, field, freed, idx; 5420 boolean_t superpage; 5421 vm_paddr_t pa; 5422 5423 /* 5424 * Assert that the given pmap is only active on the current 5425 * CPU. Unfortunately, we cannot block another CPU from 5426 * activating the pmap while this function is executing. 5427 */ 5428 KASSERT(pmap->pm_pid == mfspr(SPR_PID), 5429 ("non-current asid %lu - expected %lu", pmap->pm_pid, 5430 mfspr(SPR_PID))); 5431 5432 lock = NULL; 5433 5434 SLIST_INIT(&free); 5435 PMAP_LOCK(pmap); 5436 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 5437 allfree = 1; 5438 freed = 0; 5439 for (field = 0; field < _NPCM; field++) { 5440 inuse = ~pc->pc_map[field] & pc_freemask[field]; 5441 while (inuse != 0) { 5442 bit = cnttzd(inuse); 5443 bitmask = 1UL << bit; 5444 idx = field * 64 + bit; 5445 pv = &pc->pc_pventry[idx]; 5446 inuse &= ~bitmask; 5447 5448 pte = pmap_pml2e(pmap, pv->pv_va); 5449 ptel3e = be64toh(*pte); 5450 pte = pmap_l2e_to_l3e(pte, pv->pv_va); 5451 tpte = be64toh(*pte); 5452 if ((tpte & (RPTE_LEAF | PG_V)) == PG_V) { 5453 superpage = FALSE; 5454 ptel3e = tpte; 5455 pte = (pt_entry_t *)PHYS_TO_DMAP(tpte & 5456 PG_FRAME); 5457 pte = &pte[pmap_pte_index(pv->pv_va)]; 5458 tpte = be64toh(*pte); 5459 } else { 5460 /* 5461 * Keep track whether 'tpte' is a 5462 * superpage explicitly instead of 5463 * relying on RPTE_LEAF being set. 5464 * 5465 * This is because RPTE_LEAF is numerically 5466 * identical to PG_PTE_PAT and thus a 5467 * regular page could be mistaken for 5468 * a superpage. 5469 */ 5470 superpage = TRUE; 5471 } 5472 5473 if ((tpte & PG_V) == 0) { 5474 panic("bad pte va %lx pte %lx", 5475 pv->pv_va, tpte); 5476 } 5477 5478 /* 5479 * We cannot remove wired pages from a process' mapping at this time 5480 */ 5481 if (tpte & PG_W) { 5482 allfree = 0; 5483 continue; 5484 } 5485 5486 if (superpage) 5487 pa = tpte & PG_PS_FRAME; 5488 else 5489 pa = tpte & PG_FRAME; 5490 5491 m = PHYS_TO_VM_PAGE(pa); 5492 KASSERT(m->phys_addr == pa, 5493 ("vm_page_t %p phys_addr mismatch %016jx %016jx", 5494 m, (uintmax_t)m->phys_addr, 5495 (uintmax_t)tpte)); 5496 5497 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 5498 m < &vm_page_array[vm_page_array_size], 5499 ("pmap_remove_pages: bad tpte %#jx", 5500 (uintmax_t)tpte)); 5501 5502 pte_clear(pte); 5503 5504 /* 5505 * Update the vm_page_t clean/reference bits. 5506 */ 5507 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) { 5508 if (superpage) { 5509 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++) 5510 vm_page_dirty(mt); 5511 } else 5512 vm_page_dirty(m); 5513 } 5514 5515 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m); 5516 5517 /* Mark free */ 5518 pc->pc_map[field] |= bitmask; 5519 if (superpage) { 5520 pmap_resident_count_dec(pmap, L3_PAGE_SIZE / PAGE_SIZE); 5521 pvh = pa_to_pvh(tpte & PG_PS_FRAME); 5522 TAILQ_REMOVE(&pvh->pv_list, pv, pv_link); 5523 pvh->pv_gen++; 5524 if (TAILQ_EMPTY(&pvh->pv_list)) { 5525 for (mt = m; mt < &m[L3_PAGE_SIZE / PAGE_SIZE]; mt++) 5526 if ((mt->a.flags & PGA_WRITEABLE) != 0 && 5527 TAILQ_EMPTY(&mt->md.pv_list)) 5528 vm_page_aflag_clear(mt, PGA_WRITEABLE); 5529 } 5530 mpte = pmap_remove_pt_page(pmap, pv->pv_va); 5531 if (mpte != NULL) { 5532 pmap_resident_count_dec(pmap, 1); 5533 KASSERT(mpte->ref_count == NPTEPG, 5534 ("pmap_remove_pages: pte page wire count error")); 5535 mpte->ref_count = 0; 5536 pmap_add_delayed_free_list(mpte, &free, FALSE); 5537 } 5538 } else { 5539 pmap_resident_count_dec(pmap, 1); 5540 #ifdef VERBOSE_PV 5541 printf("freeing pv (%p, %p)\n", 5542 pmap, pv); 5543 #endif 5544 TAILQ_REMOVE(&m->md.pv_list, pv, pv_link); 5545 m->md.pv_gen++; 5546 if ((m->a.flags & PGA_WRITEABLE) != 0 && 5547 TAILQ_EMPTY(&m->md.pv_list) && 5548 (m->flags & PG_FICTITIOUS) == 0) { 5549 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5550 if (TAILQ_EMPTY(&pvh->pv_list)) 5551 vm_page_aflag_clear(m, PGA_WRITEABLE); 5552 } 5553 } 5554 pmap_unuse_pt(pmap, pv->pv_va, ptel3e, &free); 5555 freed++; 5556 } 5557 } 5558 PV_STAT(atomic_add_long(&pv_entry_frees, freed)); 5559 PV_STAT(atomic_add_int(&pv_entry_spare, freed)); 5560 PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); 5561 if (allfree) { 5562 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 5563 free_pv_chunk(pc); 5564 } 5565 } 5566 if (lock != NULL) 5567 rw_wunlock(lock); 5568 pmap_invalidate_all(pmap); 5569 PMAP_UNLOCK(pmap); 5570 vm_page_free_pages_toq(&free, true); 5571 } 5572 5573 void 5574 mmu_radix_remove_write(vm_page_t m) 5575 { 5576 struct md_page *pvh; 5577 pmap_t pmap; 5578 struct rwlock *lock; 5579 pv_entry_t next_pv, pv; 5580 pml3_entry_t *l3e; 5581 pt_entry_t oldpte, *pte; 5582 int pvh_gen, md_gen; 5583 5584 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 5585 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5586 ("pmap_remove_write: page %p is not managed", m)); 5587 vm_page_assert_busied(m); 5588 5589 if (!pmap_page_is_write_mapped(m)) 5590 return; 5591 lock = VM_PAGE_TO_PV_LIST_LOCK(m); 5592 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : 5593 pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5594 retry_pv_loop: 5595 rw_wlock(lock); 5596 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_link, next_pv) { 5597 pmap = PV_PMAP(pv); 5598 if (!PMAP_TRYLOCK(pmap)) { 5599 pvh_gen = pvh->pv_gen; 5600 rw_wunlock(lock); 5601 PMAP_LOCK(pmap); 5602 rw_wlock(lock); 5603 if (pvh_gen != pvh->pv_gen) { 5604 PMAP_UNLOCK(pmap); 5605 rw_wunlock(lock); 5606 goto retry_pv_loop; 5607 } 5608 } 5609 l3e = pmap_pml3e(pmap, pv->pv_va); 5610 if ((be64toh(*l3e) & PG_RW) != 0) 5611 (void)pmap_demote_l3e_locked(pmap, l3e, pv->pv_va, &lock); 5612 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), 5613 ("inconsistent pv lock %p %p for page %p", 5614 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); 5615 PMAP_UNLOCK(pmap); 5616 } 5617 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 5618 pmap = PV_PMAP(pv); 5619 if (!PMAP_TRYLOCK(pmap)) { 5620 pvh_gen = pvh->pv_gen; 5621 md_gen = m->md.pv_gen; 5622 rw_wunlock(lock); 5623 PMAP_LOCK(pmap); 5624 rw_wlock(lock); 5625 if (pvh_gen != pvh->pv_gen || 5626 md_gen != m->md.pv_gen) { 5627 PMAP_UNLOCK(pmap); 5628 rw_wunlock(lock); 5629 goto retry_pv_loop; 5630 } 5631 } 5632 l3e = pmap_pml3e(pmap, pv->pv_va); 5633 KASSERT((be64toh(*l3e) & RPTE_LEAF) == 0, 5634 ("pmap_remove_write: found a 2mpage in page %p's pv list", 5635 m)); 5636 pte = pmap_l3e_to_pte(l3e, pv->pv_va); 5637 retry: 5638 oldpte = be64toh(*pte); 5639 if (oldpte & PG_RW) { 5640 if (!atomic_cmpset_long(pte, htobe64(oldpte), 5641 htobe64((oldpte | RPTE_EAA_R) & ~(PG_RW | PG_M)))) 5642 goto retry; 5643 if ((oldpte & PG_M) != 0) 5644 vm_page_dirty(m); 5645 pmap_invalidate_page(pmap, pv->pv_va); 5646 } 5647 PMAP_UNLOCK(pmap); 5648 } 5649 rw_wunlock(lock); 5650 vm_page_aflag_clear(m, PGA_WRITEABLE); 5651 } 5652 5653 /* 5654 * Clear the wired attribute from the mappings for the specified range of 5655 * addresses in the given pmap. Every valid mapping within that range 5656 * must have the wired attribute set. In contrast, invalid mappings 5657 * cannot have the wired attribute set, so they are ignored. 5658 * 5659 * The wired attribute of the page table entry is not a hardware 5660 * feature, so there is no need to invalidate any TLB entries. 5661 * Since pmap_demote_l3e() for the wired entry must never fail, 5662 * pmap_delayed_invl_started()/finished() calls around the 5663 * function are not needed. 5664 */ 5665 void 5666 mmu_radix_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 5667 { 5668 vm_offset_t va_next; 5669 pml1_entry_t *l1e; 5670 pml2_entry_t *l2e; 5671 pml3_entry_t *l3e; 5672 pt_entry_t *pte; 5673 5674 CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, sva, eva); 5675 PMAP_LOCK(pmap); 5676 for (; sva < eva; sva = va_next) { 5677 l1e = pmap_pml1e(pmap, sva); 5678 if ((be64toh(*l1e) & PG_V) == 0) { 5679 va_next = (sva + L1_PAGE_SIZE) & ~L1_PAGE_MASK; 5680 if (va_next < sva) 5681 va_next = eva; 5682 continue; 5683 } 5684 l2e = pmap_l1e_to_l2e(l1e, sva); 5685 if ((be64toh(*l2e) & PG_V) == 0) { 5686 va_next = (sva + L2_PAGE_SIZE) & ~L2_PAGE_MASK; 5687 if (va_next < sva) 5688 va_next = eva; 5689 continue; 5690 } 5691 va_next = (sva + L3_PAGE_SIZE) & ~L3_PAGE_MASK; 5692 if (va_next < sva) 5693 va_next = eva; 5694 l3e = pmap_l2e_to_l3e(l2e, sva); 5695 if ((be64toh(*l3e) & PG_V) == 0) 5696 continue; 5697 if ((be64toh(*l3e) & RPTE_LEAF) != 0) { 5698 if ((be64toh(*l3e) & PG_W) == 0) 5699 panic("pmap_unwire: pde %#jx is missing PG_W", 5700 (uintmax_t)(be64toh(*l3e))); 5701 5702 /* 5703 * Are we unwiring the entire large page? If not, 5704 * demote the mapping and fall through. 5705 */ 5706 if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) { 5707 atomic_clear_long(l3e, htobe64(PG_W)); 5708 pmap->pm_stats.wired_count -= L3_PAGE_SIZE / 5709 PAGE_SIZE; 5710 continue; 5711 } else if (!pmap_demote_l3e(pmap, l3e, sva)) 5712 panic("pmap_unwire: demotion failed"); 5713 } 5714 if (va_next > eva) 5715 va_next = eva; 5716 for (pte = pmap_l3e_to_pte(l3e, sva); sva != va_next; pte++, 5717 sva += PAGE_SIZE) { 5718 MPASS(pte == pmap_pte(pmap, sva)); 5719 if ((be64toh(*pte) & PG_V) == 0) 5720 continue; 5721 if ((be64toh(*pte) & PG_W) == 0) 5722 panic("pmap_unwire: pte %#jx is missing PG_W", 5723 (uintmax_t)(be64toh(*pte))); 5724 5725 /* 5726 * PG_W must be cleared atomically. Although the pmap 5727 * lock synchronizes access to PG_W, another processor 5728 * could be setting PG_M and/or PG_A concurrently. 5729 */ 5730 atomic_clear_long(pte, htobe64(PG_W)); 5731 pmap->pm_stats.wired_count--; 5732 } 5733 } 5734 PMAP_UNLOCK(pmap); 5735 } 5736 5737 void 5738 mmu_radix_zero_page(vm_page_t m) 5739 { 5740 vm_offset_t addr; 5741 5742 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 5743 addr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 5744 pagezero(addr); 5745 } 5746 5747 void 5748 mmu_radix_zero_page_area(vm_page_t m, int off, int size) 5749 { 5750 caddr_t addr; 5751 5752 CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size); 5753 MPASS(off + size <= PAGE_SIZE); 5754 addr = (caddr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); 5755 memset(addr + off, 0, size); 5756 } 5757 5758 static int 5759 mmu_radix_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 5760 { 5761 pml3_entry_t *l3ep; 5762 pt_entry_t pte; 5763 vm_paddr_t pa; 5764 int val; 5765 5766 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr); 5767 PMAP_LOCK(pmap); 5768 5769 l3ep = pmap_pml3e(pmap, addr); 5770 if (l3ep != NULL && (be64toh(*l3ep) & PG_V)) { 5771 if (be64toh(*l3ep) & RPTE_LEAF) { 5772 pte = be64toh(*l3ep); 5773 /* Compute the physical address of the 4KB page. */ 5774 pa = ((be64toh(*l3ep) & PG_PS_FRAME) | (addr & L3_PAGE_MASK)) & 5775 PG_FRAME; 5776 val = MINCORE_PSIND(1); 5777 } else { 5778 /* Native endian PTE, do not pass to functions */ 5779 pte = be64toh(*pmap_l3e_to_pte(l3ep, addr)); 5780 pa = pte & PG_FRAME; 5781 val = 0; 5782 } 5783 } else { 5784 pte = 0; 5785 pa = 0; 5786 val = 0; 5787 } 5788 if ((pte & PG_V) != 0) { 5789 val |= MINCORE_INCORE; 5790 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) 5791 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 5792 if ((pte & PG_A) != 0) 5793 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 5794 } 5795 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 5796 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 5797 (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) { 5798 *locked_pa = pa; 5799 } 5800 PMAP_UNLOCK(pmap); 5801 return (val); 5802 } 5803 5804 void 5805 mmu_radix_activate(struct thread *td) 5806 { 5807 pmap_t pmap; 5808 uint32_t curpid; 5809 5810 CTR2(KTR_PMAP, "%s(%p)", __func__, td); 5811 critical_enter(); 5812 pmap = vmspace_pmap(td->td_proc->p_vmspace); 5813 curpid = mfspr(SPR_PID); 5814 if (pmap->pm_pid > isa3_base_pid && 5815 curpid != pmap->pm_pid) { 5816 mmu_radix_pid_set(pmap); 5817 } 5818 critical_exit(); 5819 } 5820 5821 /* 5822 * Increase the starting virtual address of the given mapping if a 5823 * different alignment might result in more superpage mappings. 5824 */ 5825 void 5826 mmu_radix_align_superpage(vm_object_t object, vm_ooffset_t offset, 5827 vm_offset_t *addr, vm_size_t size) 5828 { 5829 5830 CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr, 5831 size); 5832 vm_offset_t superpage_offset; 5833 5834 if (size < L3_PAGE_SIZE) 5835 return; 5836 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 5837 offset += ptoa(object->pg_color); 5838 superpage_offset = offset & L3_PAGE_MASK; 5839 if (size - ((L3_PAGE_SIZE - superpage_offset) & L3_PAGE_MASK) < L3_PAGE_SIZE || 5840 (*addr & L3_PAGE_MASK) == superpage_offset) 5841 return; 5842 if ((*addr & L3_PAGE_MASK) < superpage_offset) 5843 *addr = (*addr & ~L3_PAGE_MASK) + superpage_offset; 5844 else 5845 *addr = ((*addr + L3_PAGE_MASK) & ~L3_PAGE_MASK) + superpage_offset; 5846 } 5847 5848 static void * 5849 mmu_radix_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr) 5850 { 5851 vm_offset_t va, tmpva, ppa, offset; 5852 5853 ppa = trunc_page(pa); 5854 offset = pa & PAGE_MASK; 5855 size = roundup2(offset + size, PAGE_SIZE); 5856 if (pa < powerpc_ptob(Maxmem)) 5857 panic("bad pa: %#lx less than Maxmem %#lx\n", 5858 pa, powerpc_ptob(Maxmem)); 5859 va = kva_alloc(size); 5860 if (bootverbose) 5861 printf("%s(%#lx, %lu, %d)\n", __func__, pa, size, attr); 5862 KASSERT(size > 0, ("%s(%#lx, %lu, %d)", __func__, pa, size, attr)); 5863 5864 if (!va) 5865 panic("%s: Couldn't alloc kernel virtual memory", __func__); 5866 5867 for (tmpva = va; size > 0;) { 5868 mmu_radix_kenter_attr(tmpva, ppa, attr); 5869 size -= PAGE_SIZE; 5870 tmpva += PAGE_SIZE; 5871 ppa += PAGE_SIZE; 5872 } 5873 ptesync(); 5874 5875 return ((void *)(va + offset)); 5876 } 5877 5878 static void * 5879 mmu_radix_mapdev(vm_paddr_t pa, vm_size_t size) 5880 { 5881 5882 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size); 5883 5884 return (mmu_radix_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT)); 5885 } 5886 5887 void 5888 mmu_radix_page_set_memattr(vm_page_t m, vm_memattr_t ma) 5889 { 5890 5891 CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma); 5892 m->md.mdpg_cache_attrs = ma; 5893 5894 /* 5895 * If "m" is a normal page, update its direct mapping. This update 5896 * can be relied upon to perform any cache operations that are 5897 * required for data coherence. 5898 */ 5899 if ((m->flags & PG_FICTITIOUS) == 0 && 5900 mmu_radix_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 5901 PAGE_SIZE, m->md.mdpg_cache_attrs)) 5902 panic("memory attribute change on the direct map failed"); 5903 } 5904 5905 static void 5906 mmu_radix_unmapdev(vm_offset_t va, vm_size_t size) 5907 { 5908 vm_offset_t offset; 5909 5910 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size); 5911 /* If we gave a direct map region in pmap_mapdev, do nothing */ 5912 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) 5913 return; 5914 5915 offset = va & PAGE_MASK; 5916 size = round_page(offset + size); 5917 va = trunc_page(va); 5918 5919 if (pmap_initialized) { 5920 mmu_radix_qremove(va, atop(size)); 5921 kva_free(va, size); 5922 } 5923 } 5924 5925 static __inline void 5926 pmap_pte_attr(pt_entry_t *pte, uint64_t cache_bits, uint64_t mask) 5927 { 5928 uint64_t opte, npte; 5929 5930 /* 5931 * The cache mode bits are all in the low 32-bits of the 5932 * PTE, so we can just spin on updating the low 32-bits. 5933 */ 5934 do { 5935 opte = be64toh(*pte); 5936 npte = opte & ~mask; 5937 npte |= cache_bits; 5938 } while (npte != opte && !atomic_cmpset_long(pte, htobe64(opte), htobe64(npte))); 5939 } 5940 5941 /* 5942 * Tries to demote a 1GB page mapping. 5943 */ 5944 static boolean_t 5945 pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va) 5946 { 5947 pml2_entry_t oldpdpe; 5948 pml3_entry_t *firstpde, newpde, *pde; 5949 vm_paddr_t pdpgpa; 5950 vm_page_t pdpg; 5951 5952 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 5953 oldpdpe = be64toh(*l2e); 5954 KASSERT((oldpdpe & (RPTE_LEAF | PG_V)) == (RPTE_LEAF | PG_V), 5955 ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V")); 5956 pdpg = vm_page_alloc(NULL, va >> L2_PAGE_SIZE_SHIFT, 5957 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); 5958 if (pdpg == NULL) { 5959 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx" 5960 " in pmap %p", va, pmap); 5961 return (FALSE); 5962 } 5963 pdpgpa = VM_PAGE_TO_PHYS(pdpg); 5964 firstpde = (pml3_entry_t *)PHYS_TO_DMAP(pdpgpa); 5965 KASSERT((oldpdpe & PG_A) != 0, 5966 ("pmap_demote_pdpe: oldpdpe is missing PG_A")); 5967 KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW, 5968 ("pmap_demote_pdpe: oldpdpe is missing PG_M")); 5969 newpde = oldpdpe; 5970 5971 /* 5972 * Initialize the page directory page. 5973 */ 5974 for (pde = firstpde; pde < firstpde + NPDEPG; pde++) { 5975 *pde = htobe64(newpde); 5976 newpde += L3_PAGE_SIZE; 5977 } 5978 5979 /* 5980 * Demote the mapping. 5981 */ 5982 pde_store(l2e, pdpgpa); 5983 5984 /* 5985 * Flush PWC --- XXX revisit 5986 */ 5987 pmap_invalidate_all(pmap); 5988 5989 counter_u64_add(pmap_l2e_demotions, 1); 5990 CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx" 5991 " in pmap %p", va, pmap); 5992 return (TRUE); 5993 } 5994 5995 vm_paddr_t 5996 mmu_radix_kextract(vm_offset_t va) 5997 { 5998 pml3_entry_t l3e; 5999 vm_paddr_t pa; 6000 6001 CTR2(KTR_PMAP, "%s(%#x)", __func__, va); 6002 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) { 6003 pa = DMAP_TO_PHYS(va); 6004 } else { 6005 /* Big-endian PTE on stack */ 6006 l3e = *pmap_pml3e(kernel_pmap, va); 6007 if (be64toh(l3e) & RPTE_LEAF) { 6008 pa = (be64toh(l3e) & PG_PS_FRAME) | (va & L3_PAGE_MASK); 6009 pa |= (va & L3_PAGE_MASK); 6010 } else { 6011 /* 6012 * Beware of a concurrent promotion that changes the 6013 * PDE at this point! For example, vtopte() must not 6014 * be used to access the PTE because it would use the 6015 * new PDE. It is, however, safe to use the old PDE 6016 * because the page table page is preserved by the 6017 * promotion. 6018 */ 6019 pa = be64toh(*pmap_l3e_to_pte(&l3e, va)); 6020 pa = (pa & PG_FRAME) | (va & PAGE_MASK); 6021 pa |= (va & PAGE_MASK); 6022 } 6023 } 6024 return (pa); 6025 } 6026 6027 static pt_entry_t 6028 mmu_radix_calc_wimg(vm_paddr_t pa, vm_memattr_t ma) 6029 { 6030 6031 if (ma != VM_MEMATTR_DEFAULT) { 6032 return pmap_cache_bits(ma); 6033 } 6034 6035 /* 6036 * Assume the page is cache inhibited and access is guarded unless 6037 * it's in our available memory array. 6038 */ 6039 for (int i = 0; i < pregions_sz; i++) { 6040 if ((pa >= pregions[i].mr_start) && 6041 (pa < (pregions[i].mr_start + pregions[i].mr_size))) 6042 return (RPTE_ATTR_MEM); 6043 } 6044 return (RPTE_ATTR_GUARDEDIO); 6045 } 6046 6047 static void 6048 mmu_radix_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) 6049 { 6050 pt_entry_t *pte, pteval; 6051 uint64_t cache_bits; 6052 6053 pte = kvtopte(va); 6054 MPASS(pte != NULL); 6055 pteval = pa | RPTE_EAA_R | RPTE_EAA_W | RPTE_EAA_P | PG_M | PG_A; 6056 cache_bits = mmu_radix_calc_wimg(pa, ma); 6057 pte_store(pte, pteval | cache_bits); 6058 } 6059 6060 void 6061 mmu_radix_kremove(vm_offset_t va) 6062 { 6063 pt_entry_t *pte; 6064 6065 CTR2(KTR_PMAP, "%s(%#x)", __func__, va); 6066 6067 pte = kvtopte(va); 6068 pte_clear(pte); 6069 } 6070 6071 int 6072 mmu_radix_decode_kernel_ptr(vm_offset_t addr, 6073 int *is_user, vm_offset_t *decoded) 6074 { 6075 6076 CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr); 6077 *decoded = addr; 6078 *is_user = (addr < VM_MAXUSER_ADDRESS); 6079 return (0); 6080 } 6081 6082 static boolean_t 6083 mmu_radix_dev_direct_mapped(vm_paddr_t pa, vm_size_t size) 6084 { 6085 6086 CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size); 6087 return (mem_valid(pa, size)); 6088 } 6089 6090 static void 6091 mmu_radix_scan_init() 6092 { 6093 6094 CTR1(KTR_PMAP, "%s()", __func__); 6095 UNIMPLEMENTED(); 6096 } 6097 6098 static void 6099 mmu_radix_dumpsys_map(vm_paddr_t pa, size_t sz, 6100 void **va) 6101 { 6102 CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va); 6103 UNIMPLEMENTED(); 6104 } 6105 6106 vm_offset_t 6107 mmu_radix_quick_enter_page(vm_page_t m) 6108 { 6109 vm_paddr_t paddr; 6110 6111 CTR2(KTR_PMAP, "%s(%p)", __func__, m); 6112 paddr = VM_PAGE_TO_PHYS(m); 6113 return (PHYS_TO_DMAP(paddr)); 6114 } 6115 6116 void 6117 mmu_radix_quick_remove_page(vm_offset_t addr __unused) 6118 { 6119 /* no work to do here */ 6120 CTR2(KTR_PMAP, "%s(%#x)", __func__, addr); 6121 } 6122 6123 static void 6124 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva) 6125 { 6126 cpu_flush_dcache((void *)sva, eva - sva); 6127 } 6128 6129 int 6130 mmu_radix_change_attr(vm_offset_t va, vm_size_t size, 6131 vm_memattr_t mode) 6132 { 6133 int error; 6134 6135 CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, va, size, mode); 6136 PMAP_LOCK(kernel_pmap); 6137 error = pmap_change_attr_locked(va, size, mode, true); 6138 PMAP_UNLOCK(kernel_pmap); 6139 return (error); 6140 } 6141 6142 static int 6143 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush) 6144 { 6145 vm_offset_t base, offset, tmpva; 6146 vm_paddr_t pa_start, pa_end, pa_end1; 6147 pml2_entry_t *l2e; 6148 pml3_entry_t *l3e; 6149 pt_entry_t *pte; 6150 int cache_bits, error; 6151 boolean_t changed; 6152 6153 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED); 6154 base = trunc_page(va); 6155 offset = va & PAGE_MASK; 6156 size = round_page(offset + size); 6157 6158 /* 6159 * Only supported on kernel virtual addresses, including the direct 6160 * map but excluding the recursive map. 6161 */ 6162 if (base < DMAP_MIN_ADDRESS) 6163 return (EINVAL); 6164 6165 cache_bits = pmap_cache_bits(mode); 6166 changed = FALSE; 6167 6168 /* 6169 * Pages that aren't mapped aren't supported. Also break down 2MB pages 6170 * into 4KB pages if required. 6171 */ 6172 for (tmpva = base; tmpva < base + size; ) { 6173 l2e = pmap_pml2e(kernel_pmap, tmpva); 6174 if (l2e == NULL || *l2e == 0) 6175 return (EINVAL); 6176 if (be64toh(*l2e) & RPTE_LEAF) { 6177 /* 6178 * If the current 1GB page already has the required 6179 * memory type, then we need not demote this page. Just 6180 * increment tmpva to the next 1GB page frame. 6181 */ 6182 if ((be64toh(*l2e) & RPTE_ATTR_MASK) == cache_bits) { 6183 tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE; 6184 continue; 6185 } 6186 6187 /* 6188 * If the current offset aligns with a 1GB page frame 6189 * and there is at least 1GB left within the range, then 6190 * we need not break down this page into 2MB pages. 6191 */ 6192 if ((tmpva & L2_PAGE_MASK) == 0 && 6193 tmpva + L2_PAGE_MASK < base + size) { 6194 tmpva += L2_PAGE_MASK; 6195 continue; 6196 } 6197 if (!pmap_demote_l2e(kernel_pmap, l2e, tmpva)) 6198 return (ENOMEM); 6199 } 6200 l3e = pmap_l2e_to_l3e(l2e, tmpva); 6201 KASSERT(l3e != NULL, ("no l3e entry for %#lx in %p\n", 6202 tmpva, l2e)); 6203 if (*l3e == 0) 6204 return (EINVAL); 6205 if (be64toh(*l3e) & RPTE_LEAF) { 6206 /* 6207 * If the current 2MB page already has the required 6208 * memory type, then we need not demote this page. Just 6209 * increment tmpva to the next 2MB page frame. 6210 */ 6211 if ((be64toh(*l3e) & RPTE_ATTR_MASK) == cache_bits) { 6212 tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE; 6213 continue; 6214 } 6215 6216 /* 6217 * If the current offset aligns with a 2MB page frame 6218 * and there is at least 2MB left within the range, then 6219 * we need not break down this page into 4KB pages. 6220 */ 6221 if ((tmpva & L3_PAGE_MASK) == 0 && 6222 tmpva + L3_PAGE_MASK < base + size) { 6223 tmpva += L3_PAGE_SIZE; 6224 continue; 6225 } 6226 if (!pmap_demote_l3e(kernel_pmap, l3e, tmpva)) 6227 return (ENOMEM); 6228 } 6229 pte = pmap_l3e_to_pte(l3e, tmpva); 6230 if (*pte == 0) 6231 return (EINVAL); 6232 tmpva += PAGE_SIZE; 6233 } 6234 error = 0; 6235 6236 /* 6237 * Ok, all the pages exist, so run through them updating their 6238 * cache mode if required. 6239 */ 6240 pa_start = pa_end = 0; 6241 for (tmpva = base; tmpva < base + size; ) { 6242 l2e = pmap_pml2e(kernel_pmap, tmpva); 6243 if (be64toh(*l2e) & RPTE_LEAF) { 6244 if ((be64toh(*l2e) & RPTE_ATTR_MASK) != cache_bits) { 6245 pmap_pte_attr(l2e, cache_bits, 6246 RPTE_ATTR_MASK); 6247 changed = TRUE; 6248 } 6249 if (tmpva >= VM_MIN_KERNEL_ADDRESS && 6250 (*l2e & PG_PS_FRAME) < dmaplimit) { 6251 if (pa_start == pa_end) { 6252 /* Start physical address run. */ 6253 pa_start = be64toh(*l2e) & PG_PS_FRAME; 6254 pa_end = pa_start + L2_PAGE_SIZE; 6255 } else if (pa_end == (be64toh(*l2e) & PG_PS_FRAME)) 6256 pa_end += L2_PAGE_SIZE; 6257 else { 6258 /* Run ended, update direct map. */ 6259 error = pmap_change_attr_locked( 6260 PHYS_TO_DMAP(pa_start), 6261 pa_end - pa_start, mode, flush); 6262 if (error != 0) 6263 break; 6264 /* Start physical address run. */ 6265 pa_start = be64toh(*l2e) & PG_PS_FRAME; 6266 pa_end = pa_start + L2_PAGE_SIZE; 6267 } 6268 } 6269 tmpva = trunc_1gpage(tmpva) + L2_PAGE_SIZE; 6270 continue; 6271 } 6272 l3e = pmap_l2e_to_l3e(l2e, tmpva); 6273 if (be64toh(*l3e) & RPTE_LEAF) { 6274 if ((be64toh(*l3e) & RPTE_ATTR_MASK) != cache_bits) { 6275 pmap_pte_attr(l3e, cache_bits, 6276 RPTE_ATTR_MASK); 6277 changed = TRUE; 6278 } 6279 if (tmpva >= VM_MIN_KERNEL_ADDRESS && 6280 (be64toh(*l3e) & PG_PS_FRAME) < dmaplimit) { 6281 if (pa_start == pa_end) { 6282 /* Start physical address run. */ 6283 pa_start = be64toh(*l3e) & PG_PS_FRAME; 6284 pa_end = pa_start + L3_PAGE_SIZE; 6285 } else if (pa_end == (be64toh(*l3e) & PG_PS_FRAME)) 6286 pa_end += L3_PAGE_SIZE; 6287 else { 6288 /* Run ended, update direct map. */ 6289 error = pmap_change_attr_locked( 6290 PHYS_TO_DMAP(pa_start), 6291 pa_end - pa_start, mode, flush); 6292 if (error != 0) 6293 break; 6294 /* Start physical address run. */ 6295 pa_start = be64toh(*l3e) & PG_PS_FRAME; 6296 pa_end = pa_start + L3_PAGE_SIZE; 6297 } 6298 } 6299 tmpva = trunc_2mpage(tmpva) + L3_PAGE_SIZE; 6300 } else { 6301 pte = pmap_l3e_to_pte(l3e, tmpva); 6302 if ((be64toh(*pte) & RPTE_ATTR_MASK) != cache_bits) { 6303 pmap_pte_attr(pte, cache_bits, 6304 RPTE_ATTR_MASK); 6305 changed = TRUE; 6306 } 6307 if (tmpva >= VM_MIN_KERNEL_ADDRESS && 6308 (be64toh(*pte) & PG_FRAME) < dmaplimit) { 6309 if (pa_start == pa_end) { 6310 /* Start physical address run. */ 6311 pa_start = be64toh(*pte) & PG_FRAME; 6312 pa_end = pa_start + PAGE_SIZE; 6313 } else if (pa_end == (be64toh(*pte) & PG_FRAME)) 6314 pa_end += PAGE_SIZE; 6315 else { 6316 /* Run ended, update direct map. */ 6317 error = pmap_change_attr_locked( 6318 PHYS_TO_DMAP(pa_start), 6319 pa_end - pa_start, mode, flush); 6320 if (error != 0) 6321 break; 6322 /* Start physical address run. */ 6323 pa_start = be64toh(*pte) & PG_FRAME; 6324 pa_end = pa_start + PAGE_SIZE; 6325 } 6326 } 6327 tmpva += PAGE_SIZE; 6328 } 6329 } 6330 if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) { 6331 pa_end1 = MIN(pa_end, dmaplimit); 6332 if (pa_start != pa_end1) 6333 error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start), 6334 pa_end1 - pa_start, mode, flush); 6335 } 6336 6337 /* 6338 * Flush CPU caches if required to make sure any data isn't cached that 6339 * shouldn't be, etc. 6340 */ 6341 if (changed) { 6342 pmap_invalidate_all(kernel_pmap); 6343 6344 if (flush) 6345 pmap_invalidate_cache_range(base, tmpva); 6346 } 6347 return (error); 6348 } 6349 6350 /* 6351 * Allocate physical memory for the vm_page array and map it into KVA, 6352 * attempting to back the vm_pages with domain-local memory. 6353 */ 6354 void 6355 mmu_radix_page_array_startup(long pages) 6356 { 6357 #ifdef notyet 6358 pml2_entry_t *l2e; 6359 pml3_entry_t *pde; 6360 pml3_entry_t newl3; 6361 vm_offset_t va; 6362 long pfn; 6363 int domain, i; 6364 #endif 6365 vm_paddr_t pa; 6366 vm_offset_t start, end; 6367 6368 vm_page_array_size = pages; 6369 6370 start = VM_MIN_KERNEL_ADDRESS; 6371 end = start + pages * sizeof(struct vm_page); 6372 6373 pa = vm_phys_early_alloc(0, end - start); 6374 6375 start = mmu_radix_map(&start, pa, end - start, VM_MEMATTR_DEFAULT); 6376 #ifdef notyet 6377 /* TODO: NUMA vm_page_array. Blocked out until then (copied from amd64). */ 6378 for (va = start; va < end; va += L3_PAGE_SIZE) { 6379 pfn = first_page + (va - start) / sizeof(struct vm_page); 6380 domain = vm_phys_domain(ptoa(pfn)); 6381 l2e = pmap_pml2e(kernel_pmap, va); 6382 if ((be64toh(*l2e) & PG_V) == 0) { 6383 pa = vm_phys_early_alloc(domain, PAGE_SIZE); 6384 dump_add_page(pa); 6385 pagezero(PHYS_TO_DMAP(pa)); 6386 pde_store(l2e, (pml2_entry_t)pa); 6387 } 6388 pde = pmap_l2e_to_l3e(l2e, va); 6389 if ((be64toh(*pde) & PG_V) != 0) 6390 panic("Unexpected pde %p", pde); 6391 pa = vm_phys_early_alloc(domain, L3_PAGE_SIZE); 6392 for (i = 0; i < NPDEPG; i++) 6393 dump_add_page(pa + i * PAGE_SIZE); 6394 newl3 = (pml3_entry_t)(pa | RPTE_EAA_P | RPTE_EAA_R | RPTE_EAA_W); 6395 pte_store(pde, newl3); 6396 } 6397 #endif 6398 vm_page_array = (vm_page_t)start; 6399 } 6400 6401 #ifdef DDB 6402 #include <sys/kdb.h> 6403 #include <ddb/ddb.h> 6404 6405 static void 6406 pmap_pte_walk(pml1_entry_t *l1, vm_offset_t va) 6407 { 6408 pml1_entry_t *l1e; 6409 pml2_entry_t *l2e; 6410 pml3_entry_t *l3e; 6411 pt_entry_t *pte; 6412 6413 l1e = &l1[pmap_pml1e_index(va)]; 6414 db_printf("VA %#016lx l1e %#016lx", va, be64toh(*l1e)); 6415 if ((be64toh(*l1e) & PG_V) == 0) { 6416 db_printf("\n"); 6417 return; 6418 } 6419 l2e = pmap_l1e_to_l2e(l1e, va); 6420 db_printf(" l2e %#016lx", be64toh(*l2e)); 6421 if ((be64toh(*l2e) & PG_V) == 0 || (be64toh(*l2e) & RPTE_LEAF) != 0) { 6422 db_printf("\n"); 6423 return; 6424 } 6425 l3e = pmap_l2e_to_l3e(l2e, va); 6426 db_printf(" l3e %#016lx", be64toh(*l3e)); 6427 if ((be64toh(*l3e) & PG_V) == 0 || (be64toh(*l3e) & RPTE_LEAF) != 0) { 6428 db_printf("\n"); 6429 return; 6430 } 6431 pte = pmap_l3e_to_pte(l3e, va); 6432 db_printf(" pte %#016lx\n", be64toh(*pte)); 6433 } 6434 6435 void 6436 pmap_page_print_mappings(vm_page_t m) 6437 { 6438 pmap_t pmap; 6439 pv_entry_t pv; 6440 6441 db_printf("page %p(%lx)\n", m, m->phys_addr); 6442 /* need to elide locks if running in ddb */ 6443 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 6444 db_printf("pv: %p ", pv); 6445 db_printf("va: %#016lx ", pv->pv_va); 6446 pmap = PV_PMAP(pv); 6447 db_printf("pmap %p ", pmap); 6448 if (pmap != NULL) { 6449 db_printf("asid: %lu\n", pmap->pm_pid); 6450 pmap_pte_walk(pmap->pm_pml1, pv->pv_va); 6451 } 6452 } 6453 } 6454 6455 DB_SHOW_COMMAND(pte, pmap_print_pte) 6456 { 6457 vm_offset_t va; 6458 pmap_t pmap; 6459 6460 if (!have_addr) { 6461 db_printf("show pte addr\n"); 6462 return; 6463 } 6464 va = (vm_offset_t)addr; 6465 6466 if (va >= DMAP_MIN_ADDRESS) 6467 pmap = kernel_pmap; 6468 else if (kdb_thread != NULL) 6469 pmap = vmspace_pmap(kdb_thread->td_proc->p_vmspace); 6470 else 6471 pmap = vmspace_pmap(curthread->td_proc->p_vmspace); 6472 6473 pmap_pte_walk(pmap->pm_pml1, va); 6474 } 6475 6476 #endif 6477