1 /*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 /*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68 /*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93 #include <sys/cdefs.h> 94 __FBSDID("$FreeBSD$"); 95 96 /* 97 * Manages physical address maps. 98 * 99 * In addition to hardware address maps, this module is called upon to 100 * provide software-use-only maps which may or may not be stored in the 101 * same form as hardware maps. These pseudo-maps are used to store 102 * intermediate results from copy operations to and from address spaces. 103 * 104 * Since the information managed by this module is also stored by the 105 * logical address mapping module, this module may throw away valid virtual 106 * to physical mappings at almost any time. However, invalidations of 107 * mappings must be done as requested. 108 * 109 * In order to cope with hardware architectures which make virtual to 110 * physical map invalidates expensive, this module may delay invalidate 111 * reduced protection operations until such time as they are actually 112 * necessary. This module is given full information as to which processors 113 * are currently using which maps, and to when physical maps must be made 114 * correct. 115 */ 116 117 #include "opt_kstack_pages.h" 118 119 #include <sys/param.h> 120 #include <sys/kernel.h> 121 #include <sys/ktr.h> 122 #include <sys/lock.h> 123 #include <sys/msgbuf.h> 124 #include <sys/mutex.h> 125 #include <sys/proc.h> 126 #include <sys/sysctl.h> 127 #include <sys/systm.h> 128 #include <sys/vmmeter.h> 129 130 #include <dev/ofw/openfirm.h> 131 132 #include <vm/vm.h> 133 #include <vm/vm_param.h> 134 #include <vm/vm_kern.h> 135 #include <vm/vm_page.h> 136 #include <vm/vm_map.h> 137 #include <vm/vm_object.h> 138 #include <vm/vm_extern.h> 139 #include <vm/vm_pageout.h> 140 #include <vm/vm_pager.h> 141 #include <vm/uma.h> 142 143 #include <machine/cpu.h> 144 #include <machine/platform.h> 145 #include <machine/bat.h> 146 #include <machine/frame.h> 147 #include <machine/md_var.h> 148 #include <machine/psl.h> 149 #include <machine/pte.h> 150 #include <machine/smp.h> 151 #include <machine/sr.h> 152 #include <machine/mmuvar.h> 153 154 #include "mmu_if.h" 155 156 #define MOEA_DEBUG 157 158 #define TODO panic("%s: not implemented", __func__); 159 160 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 161 #define VSID_TO_SR(vsid) ((vsid) & 0xf) 162 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 163 164 #define PVO_PTEGIDX_MASK 0x007 /* which PTEG slot */ 165 #define PVO_PTEGIDX_VALID 0x008 /* slot is valid */ 166 #define PVO_WIRED 0x010 /* PVO entry is wired */ 167 #define PVO_MANAGED 0x020 /* PVO entry is managed */ 168 #define PVO_EXECUTABLE 0x040 /* PVO entry is executable */ 169 #define PVO_BOOTSTRAP 0x080 /* PVO entry allocated during 170 bootstrap */ 171 #define PVO_FAKE 0x100 /* fictitious phys page */ 172 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 173 #define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 174 #define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE) 175 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 176 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 177 #define PVO_PTEGIDX_CLR(pvo) \ 178 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 179 #define PVO_PTEGIDX_SET(pvo, i) \ 180 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 181 182 #define MOEA_PVO_CHECK(pvo) 183 184 struct ofw_map { 185 vm_offset_t om_va; 186 vm_size_t om_len; 187 vm_offset_t om_pa; 188 u_int om_mode; 189 }; 190 191 /* 192 * Map of physical memory regions. 193 */ 194 static struct mem_region *regions; 195 static struct mem_region *pregions; 196 static u_int phys_avail_count; 197 static int regions_sz, pregions_sz; 198 static struct ofw_map *translations; 199 200 extern struct pmap ofw_pmap; 201 202 /* 203 * Lock for the pteg and pvo tables. 204 */ 205 struct mtx moea_table_mutex; 206 struct mtx moea_vsid_mutex; 207 208 /* tlbie instruction synchronization */ 209 static struct mtx tlbie_mtx; 210 211 /* 212 * PTEG data. 213 */ 214 static struct pteg *moea_pteg_table; 215 u_int moea_pteg_count; 216 u_int moea_pteg_mask; 217 218 /* 219 * PVO data. 220 */ 221 struct pvo_head *moea_pvo_table; /* pvo entries by pteg index */ 222 struct pvo_head moea_pvo_kunmanaged = 223 LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */ 224 225 uma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */ 226 uma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */ 227 228 #define BPVO_POOL_SIZE 32768 229 static struct pvo_entry *moea_bpvo_pool; 230 static int moea_bpvo_pool_index = 0; 231 232 #define VSID_NBPW (sizeof(u_int32_t) * 8) 233 static u_int moea_vsid_bitmap[NPMAPS / VSID_NBPW]; 234 235 static boolean_t moea_initialized = FALSE; 236 237 /* 238 * Statistics. 239 */ 240 u_int moea_pte_valid = 0; 241 u_int moea_pte_overflow = 0; 242 u_int moea_pte_replacements = 0; 243 u_int moea_pvo_entries = 0; 244 u_int moea_pvo_enter_calls = 0; 245 u_int moea_pvo_remove_calls = 0; 246 u_int moea_pte_spills = 0; 247 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid, 248 0, ""); 249 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD, 250 &moea_pte_overflow, 0, ""); 251 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD, 252 &moea_pte_replacements, 0, ""); 253 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries, 254 0, ""); 255 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD, 256 &moea_pvo_enter_calls, 0, ""); 257 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD, 258 &moea_pvo_remove_calls, 0, ""); 259 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD, 260 &moea_pte_spills, 0, ""); 261 262 /* 263 * Allocate physical memory for use in moea_bootstrap. 264 */ 265 static vm_offset_t moea_bootstrap_alloc(vm_size_t, u_int); 266 267 /* 268 * PTE calls. 269 */ 270 static int moea_pte_insert(u_int, struct pte *); 271 272 /* 273 * PVO calls. 274 */ 275 static int moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 276 vm_offset_t, vm_offset_t, u_int, int); 277 static void moea_pvo_remove(struct pvo_entry *, int); 278 static struct pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *); 279 static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int); 280 281 /* 282 * Utility routines. 283 */ 284 static void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, 285 vm_prot_t, boolean_t); 286 static void moea_syncicache(vm_offset_t, vm_size_t); 287 static boolean_t moea_query_bit(vm_page_t, int); 288 static u_int moea_clear_bit(vm_page_t, int); 289 static void moea_kremove(mmu_t, vm_offset_t); 290 int moea_pte_spill(vm_offset_t); 291 292 /* 293 * Kernel MMU interface 294 */ 295 void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 296 void moea_clear_modify(mmu_t, vm_page_t); 297 void moea_clear_reference(mmu_t, vm_page_t); 298 void moea_copy_page(mmu_t, vm_page_t, vm_page_t); 299 void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 300 void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 301 vm_prot_t); 302 void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 303 vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t); 304 vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 305 void moea_init(mmu_t); 306 boolean_t moea_is_modified(mmu_t, vm_page_t); 307 boolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 308 boolean_t moea_is_referenced(mmu_t, vm_page_t); 309 boolean_t moea_ts_referenced(mmu_t, vm_page_t); 310 vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 311 boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); 312 int moea_page_wired_mappings(mmu_t, vm_page_t); 313 void moea_pinit(mmu_t, pmap_t); 314 void moea_pinit0(mmu_t, pmap_t); 315 void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 316 void moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 317 void moea_qremove(mmu_t, vm_offset_t, int); 318 void moea_release(mmu_t, pmap_t); 319 void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 320 void moea_remove_all(mmu_t, vm_page_t); 321 void moea_remove_write(mmu_t, vm_page_t); 322 void moea_zero_page(mmu_t, vm_page_t); 323 void moea_zero_page_area(mmu_t, vm_page_t, int, int); 324 void moea_zero_page_idle(mmu_t, vm_page_t); 325 void moea_activate(mmu_t, struct thread *); 326 void moea_deactivate(mmu_t, struct thread *); 327 void moea_cpu_bootstrap(mmu_t, int); 328 void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 329 void *moea_mapdev(mmu_t, vm_offset_t, vm_size_t); 330 void *moea_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 331 void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t); 332 vm_offset_t moea_kextract(mmu_t, vm_offset_t); 333 void moea_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t); 334 void moea_kenter(mmu_t, vm_offset_t, vm_offset_t); 335 void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma); 336 boolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 337 static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 338 339 static mmu_method_t moea_methods[] = { 340 MMUMETHOD(mmu_change_wiring, moea_change_wiring), 341 MMUMETHOD(mmu_clear_modify, moea_clear_modify), 342 MMUMETHOD(mmu_clear_reference, moea_clear_reference), 343 MMUMETHOD(mmu_copy_page, moea_copy_page), 344 MMUMETHOD(mmu_enter, moea_enter), 345 MMUMETHOD(mmu_enter_object, moea_enter_object), 346 MMUMETHOD(mmu_enter_quick, moea_enter_quick), 347 MMUMETHOD(mmu_extract, moea_extract), 348 MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold), 349 MMUMETHOD(mmu_init, moea_init), 350 MMUMETHOD(mmu_is_modified, moea_is_modified), 351 MMUMETHOD(mmu_is_prefaultable, moea_is_prefaultable), 352 MMUMETHOD(mmu_is_referenced, moea_is_referenced), 353 MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), 354 MMUMETHOD(mmu_map, moea_map), 355 MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), 356 MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings), 357 MMUMETHOD(mmu_pinit, moea_pinit), 358 MMUMETHOD(mmu_pinit0, moea_pinit0), 359 MMUMETHOD(mmu_protect, moea_protect), 360 MMUMETHOD(mmu_qenter, moea_qenter), 361 MMUMETHOD(mmu_qremove, moea_qremove), 362 MMUMETHOD(mmu_release, moea_release), 363 MMUMETHOD(mmu_remove, moea_remove), 364 MMUMETHOD(mmu_remove_all, moea_remove_all), 365 MMUMETHOD(mmu_remove_write, moea_remove_write), 366 MMUMETHOD(mmu_sync_icache, moea_sync_icache), 367 MMUMETHOD(mmu_zero_page, moea_zero_page), 368 MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), 369 MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), 370 MMUMETHOD(mmu_activate, moea_activate), 371 MMUMETHOD(mmu_deactivate, moea_deactivate), 372 MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr), 373 374 /* Internal interfaces */ 375 MMUMETHOD(mmu_bootstrap, moea_bootstrap), 376 MMUMETHOD(mmu_cpu_bootstrap, moea_cpu_bootstrap), 377 MMUMETHOD(mmu_mapdev_attr, moea_mapdev_attr), 378 MMUMETHOD(mmu_mapdev, moea_mapdev), 379 MMUMETHOD(mmu_unmapdev, moea_unmapdev), 380 MMUMETHOD(mmu_kextract, moea_kextract), 381 MMUMETHOD(mmu_kenter, moea_kenter), 382 MMUMETHOD(mmu_kenter_attr, moea_kenter_attr), 383 MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped), 384 385 { 0, 0 } 386 }; 387 388 MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0); 389 390 static __inline uint32_t 391 moea_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 392 { 393 uint32_t pte_lo; 394 int i; 395 396 if (ma != VM_MEMATTR_DEFAULT) { 397 switch (ma) { 398 case VM_MEMATTR_UNCACHEABLE: 399 return (PTE_I | PTE_G); 400 case VM_MEMATTR_WRITE_COMBINING: 401 case VM_MEMATTR_WRITE_BACK: 402 case VM_MEMATTR_PREFETCHABLE: 403 return (PTE_I); 404 case VM_MEMATTR_WRITE_THROUGH: 405 return (PTE_W | PTE_M); 406 } 407 } 408 409 /* 410 * Assume the page is cache inhibited and access is guarded unless 411 * it's in our available memory array. 412 */ 413 pte_lo = PTE_I | PTE_G; 414 for (i = 0; i < pregions_sz; i++) { 415 if ((pa >= pregions[i].mr_start) && 416 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 417 pte_lo = PTE_M; 418 break; 419 } 420 } 421 422 return pte_lo; 423 } 424 425 static void 426 tlbie(vm_offset_t va) 427 { 428 429 mtx_lock_spin(&tlbie_mtx); 430 __asm __volatile("ptesync"); 431 __asm __volatile("tlbie %0" :: "r"(va)); 432 __asm __volatile("eieio; tlbsync; ptesync"); 433 mtx_unlock_spin(&tlbie_mtx); 434 } 435 436 static void 437 tlbia(void) 438 { 439 vm_offset_t va; 440 441 for (va = 0; va < 0x00040000; va += 0x00001000) { 442 __asm __volatile("tlbie %0" :: "r"(va)); 443 powerpc_sync(); 444 } 445 __asm __volatile("tlbsync"); 446 powerpc_sync(); 447 } 448 449 static __inline int 450 va_to_sr(u_int *sr, vm_offset_t va) 451 { 452 return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 453 } 454 455 static __inline u_int 456 va_to_pteg(u_int sr, vm_offset_t addr) 457 { 458 u_int hash; 459 460 hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 461 ADDR_PIDX_SHFT); 462 return (hash & moea_pteg_mask); 463 } 464 465 static __inline struct pvo_head * 466 vm_page_to_pvoh(vm_page_t m) 467 { 468 469 return (&m->md.mdpg_pvoh); 470 } 471 472 static __inline void 473 moea_attr_clear(vm_page_t m, int ptebit) 474 { 475 476 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 477 m->md.mdpg_attrs &= ~ptebit; 478 } 479 480 static __inline int 481 moea_attr_fetch(vm_page_t m) 482 { 483 484 return (m->md.mdpg_attrs); 485 } 486 487 static __inline void 488 moea_attr_save(vm_page_t m, int ptebit) 489 { 490 491 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 492 m->md.mdpg_attrs |= ptebit; 493 } 494 495 static __inline int 496 moea_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 497 { 498 if (pt->pte_hi == pvo_pt->pte_hi) 499 return (1); 500 501 return (0); 502 } 503 504 static __inline int 505 moea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 506 { 507 return (pt->pte_hi & ~PTE_VALID) == 508 (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 509 ((va >> ADDR_API_SHFT) & PTE_API) | which); 510 } 511 512 static __inline void 513 moea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 514 { 515 516 mtx_assert(&moea_table_mutex, MA_OWNED); 517 518 /* 519 * Construct a PTE. Default to IMB initially. Valid bit only gets 520 * set when the real pte is set in memory. 521 * 522 * Note: Don't set the valid bit for correct operation of tlb update. 523 */ 524 pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 525 (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 526 pt->pte_lo = pte_lo; 527 } 528 529 static __inline void 530 moea_pte_synch(struct pte *pt, struct pte *pvo_pt) 531 { 532 533 mtx_assert(&moea_table_mutex, MA_OWNED); 534 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 535 } 536 537 static __inline void 538 moea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 539 { 540 541 mtx_assert(&moea_table_mutex, MA_OWNED); 542 543 /* 544 * As shown in Section 7.6.3.2.3 545 */ 546 pt->pte_lo &= ~ptebit; 547 tlbie(va); 548 } 549 550 static __inline void 551 moea_pte_set(struct pte *pt, struct pte *pvo_pt) 552 { 553 554 mtx_assert(&moea_table_mutex, MA_OWNED); 555 pvo_pt->pte_hi |= PTE_VALID; 556 557 /* 558 * Update the PTE as defined in section 7.6.3.1. 559 * Note that the REF/CHG bits are from pvo_pt and thus should havce 560 * been saved so this routine can restore them (if desired). 561 */ 562 pt->pte_lo = pvo_pt->pte_lo; 563 powerpc_sync(); 564 pt->pte_hi = pvo_pt->pte_hi; 565 powerpc_sync(); 566 moea_pte_valid++; 567 } 568 569 static __inline void 570 moea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 571 { 572 573 mtx_assert(&moea_table_mutex, MA_OWNED); 574 pvo_pt->pte_hi &= ~PTE_VALID; 575 576 /* 577 * Force the reg & chg bits back into the PTEs. 578 */ 579 powerpc_sync(); 580 581 /* 582 * Invalidate the pte. 583 */ 584 pt->pte_hi &= ~PTE_VALID; 585 586 tlbie(va); 587 588 /* 589 * Save the reg & chg bits. 590 */ 591 moea_pte_synch(pt, pvo_pt); 592 moea_pte_valid--; 593 } 594 595 static __inline void 596 moea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 597 { 598 599 /* 600 * Invalidate the PTE 601 */ 602 moea_pte_unset(pt, pvo_pt, va); 603 moea_pte_set(pt, pvo_pt); 604 } 605 606 /* 607 * Quick sort callout for comparing memory regions. 608 */ 609 static int mr_cmp(const void *a, const void *b); 610 static int om_cmp(const void *a, const void *b); 611 612 static int 613 mr_cmp(const void *a, const void *b) 614 { 615 const struct mem_region *regiona; 616 const struct mem_region *regionb; 617 618 regiona = a; 619 regionb = b; 620 if (regiona->mr_start < regionb->mr_start) 621 return (-1); 622 else if (regiona->mr_start > regionb->mr_start) 623 return (1); 624 else 625 return (0); 626 } 627 628 static int 629 om_cmp(const void *a, const void *b) 630 { 631 const struct ofw_map *mapa; 632 const struct ofw_map *mapb; 633 634 mapa = a; 635 mapb = b; 636 if (mapa->om_pa < mapb->om_pa) 637 return (-1); 638 else if (mapa->om_pa > mapb->om_pa) 639 return (1); 640 else 641 return (0); 642 } 643 644 void 645 moea_cpu_bootstrap(mmu_t mmup, int ap) 646 { 647 u_int sdr; 648 int i; 649 650 if (ap) { 651 powerpc_sync(); 652 __asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu)); 653 __asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl)); 654 isync(); 655 __asm __volatile("mtibatu 0,%0" :: "r"(battable[0].batu)); 656 __asm __volatile("mtibatl 0,%0" :: "r"(battable[0].batl)); 657 isync(); 658 } 659 660 __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 661 __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 662 isync(); 663 664 __asm __volatile("mtibatu 1,%0" :: "r"(0)); 665 __asm __volatile("mtdbatu 2,%0" :: "r"(0)); 666 __asm __volatile("mtibatu 2,%0" :: "r"(0)); 667 __asm __volatile("mtdbatu 3,%0" :: "r"(0)); 668 __asm __volatile("mtibatu 3,%0" :: "r"(0)); 669 isync(); 670 671 for (i = 0; i < 16; i++) 672 mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT); 673 674 __asm __volatile("mtsr %0,%1" :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 675 __asm __volatile("mtsr %0,%1" :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT)); 676 powerpc_sync(); 677 678 sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10); 679 __asm __volatile("mtsdr1 %0" :: "r"(sdr)); 680 isync(); 681 682 tlbia(); 683 } 684 685 void 686 moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 687 { 688 ihandle_t mmui; 689 phandle_t chosen, mmu; 690 int sz; 691 int i, j; 692 int ofw_mappings; 693 vm_size_t size, physsz, hwphyssz; 694 vm_offset_t pa, va, off; 695 void *dpcpu; 696 register_t msr; 697 698 /* 699 * Set up BAT0 to map the lowest 256 MB area 700 */ 701 battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 702 battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 703 704 /* 705 * Map PCI memory space. 706 */ 707 battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 708 battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 709 710 battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 711 battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 712 713 battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 714 battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 715 716 battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 717 battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 718 719 /* 720 * Map obio devices. 721 */ 722 battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 723 battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 724 725 /* 726 * Use an IBAT and a DBAT to map the bottom segment of memory 727 * where we are. Turn off instruction relocation temporarily 728 * to prevent faults while reprogramming the IBAT. 729 */ 730 msr = mfmsr(); 731 mtmsr(msr & ~PSL_IR); 732 __asm (".balign 32; \n" 733 "mtibatu 0,%0; mtibatl 0,%1; isync; \n" 734 "mtdbatu 0,%0; mtdbatl 0,%1; isync" 735 :: "r"(battable[0].batu), "r"(battable[0].batl)); 736 mtmsr(msr); 737 738 /* map pci space */ 739 __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 740 __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 741 isync(); 742 743 /* set global direct map flag */ 744 hw_direct_map = 1; 745 746 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 747 CTR0(KTR_PMAP, "moea_bootstrap: physical memory"); 748 749 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 750 for (i = 0; i < pregions_sz; i++) { 751 vm_offset_t pa; 752 vm_offset_t end; 753 754 CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 755 pregions[i].mr_start, 756 pregions[i].mr_start + pregions[i].mr_size, 757 pregions[i].mr_size); 758 /* 759 * Install entries into the BAT table to allow all 760 * of physmem to be convered by on-demand BAT entries. 761 * The loop will sometimes set the same battable element 762 * twice, but that's fine since they won't be used for 763 * a while yet. 764 */ 765 pa = pregions[i].mr_start & 0xf0000000; 766 end = pregions[i].mr_start + pregions[i].mr_size; 767 do { 768 u_int n = pa >> ADDR_SR_SHFT; 769 770 battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 771 battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 772 pa += SEGMENT_LENGTH; 773 } while (pa < end); 774 } 775 776 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 777 panic("moea_bootstrap: phys_avail too small"); 778 qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 779 phys_avail_count = 0; 780 physsz = 0; 781 hwphyssz = 0; 782 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 783 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 784 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 785 regions[i].mr_start + regions[i].mr_size, 786 regions[i].mr_size); 787 if (hwphyssz != 0 && 788 (physsz + regions[i].mr_size) >= hwphyssz) { 789 if (physsz < hwphyssz) { 790 phys_avail[j] = regions[i].mr_start; 791 phys_avail[j + 1] = regions[i].mr_start + 792 hwphyssz - physsz; 793 physsz = hwphyssz; 794 phys_avail_count++; 795 } 796 break; 797 } 798 phys_avail[j] = regions[i].mr_start; 799 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 800 phys_avail_count++; 801 physsz += regions[i].mr_size; 802 } 803 physmem = btoc(physsz); 804 805 /* 806 * Allocate PTEG table. 807 */ 808 #ifdef PTEGCOUNT 809 moea_pteg_count = PTEGCOUNT; 810 #else 811 moea_pteg_count = 0x1000; 812 813 while (moea_pteg_count < physmem) 814 moea_pteg_count <<= 1; 815 816 moea_pteg_count >>= 1; 817 #endif /* PTEGCOUNT */ 818 819 size = moea_pteg_count * sizeof(struct pteg); 820 CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count, 821 size); 822 moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size); 823 CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table); 824 bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg)); 825 moea_pteg_mask = moea_pteg_count - 1; 826 827 /* 828 * Allocate pv/overflow lists. 829 */ 830 size = sizeof(struct pvo_head) * moea_pteg_count; 831 moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size, 832 PAGE_SIZE); 833 CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table); 834 for (i = 0; i < moea_pteg_count; i++) 835 LIST_INIT(&moea_pvo_table[i]); 836 837 /* 838 * Initialize the lock that synchronizes access to the pteg and pvo 839 * tables. 840 */ 841 mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF | 842 MTX_RECURSE); 843 mtx_init(&moea_vsid_mutex, "VSID table", NULL, MTX_DEF); 844 845 mtx_init(&tlbie_mtx, "tlbie", NULL, MTX_SPIN); 846 847 /* 848 * Initialise the unmanaged pvo pool. 849 */ 850 moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc( 851 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 852 moea_bpvo_pool_index = 0; 853 854 /* 855 * Make sure kernel vsid is allocated as well as VSID 0. 856 */ 857 moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 858 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 859 moea_vsid_bitmap[0] |= 1; 860 861 /* 862 * Set up the Open Firmware pmap and add it's mappings. 863 */ 864 moea_pinit(mmup, &ofw_pmap); 865 ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 866 ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT; 867 if ((chosen = OF_finddevice("/chosen")) == -1) 868 panic("moea_bootstrap: can't find /chosen"); 869 OF_getprop(chosen, "mmu", &mmui, 4); 870 if ((mmu = OF_instance_to_package(mmui)) == -1) 871 panic("moea_bootstrap: can't get mmu package"); 872 if ((sz = OF_getproplen(mmu, "translations")) == -1) 873 panic("moea_bootstrap: can't get ofw translation count"); 874 translations = NULL; 875 for (i = 0; phys_avail[i] != 0; i += 2) { 876 if (phys_avail[i + 1] >= sz) { 877 translations = (struct ofw_map *)phys_avail[i]; 878 break; 879 } 880 } 881 if (translations == NULL) 882 panic("moea_bootstrap: no space to copy translations"); 883 bzero(translations, sz); 884 if (OF_getprop(mmu, "translations", translations, sz) == -1) 885 panic("moea_bootstrap: can't get ofw translations"); 886 CTR0(KTR_PMAP, "moea_bootstrap: translations"); 887 sz /= sizeof(*translations); 888 qsort(translations, sz, sizeof (*translations), om_cmp); 889 for (i = 0, ofw_mappings = 0; i < sz; i++) { 890 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 891 translations[i].om_pa, translations[i].om_va, 892 translations[i].om_len); 893 894 /* 895 * If the mapping is 1:1, let the RAM and device on-demand 896 * BAT tables take care of the translation. 897 */ 898 if (translations[i].om_va == translations[i].om_pa) 899 continue; 900 901 /* Enter the pages */ 902 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 903 struct vm_page m; 904 905 m.phys_addr = translations[i].om_pa + off; 906 m.md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT; 907 m.oflags = VPO_BUSY; 908 PMAP_LOCK(&ofw_pmap); 909 moea_enter_locked(&ofw_pmap, 910 translations[i].om_va + off, &m, 911 VM_PROT_ALL, 1); 912 PMAP_UNLOCK(&ofw_pmap); 913 ofw_mappings++; 914 } 915 } 916 917 /* 918 * Calculate the last available physical address. 919 */ 920 for (i = 0; phys_avail[i + 2] != 0; i += 2) 921 ; 922 Maxmem = powerpc_btop(phys_avail[i + 1]); 923 924 /* 925 * Initialize the kernel pmap (which is statically allocated). 926 */ 927 PMAP_LOCK_INIT(kernel_pmap); 928 for (i = 0; i < 16; i++) { 929 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; 930 } 931 kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 932 kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT; 933 kernel_pmap->pm_active = ~0; 934 935 moea_cpu_bootstrap(mmup,0); 936 937 pmap_bootstrapped++; 938 939 /* 940 * Set the start and end of kva. 941 */ 942 virtual_avail = VM_MIN_KERNEL_ADDRESS; 943 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 944 945 /* 946 * Allocate a kernel stack with a guard page for thread0 and map it 947 * into the kernel page map. 948 */ 949 pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 950 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 951 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 952 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 953 thread0.td_kstack = va; 954 thread0.td_kstack_pages = KSTACK_PAGES; 955 for (i = 0; i < KSTACK_PAGES; i++) { 956 moea_kenter(mmup, va, pa); 957 pa += PAGE_SIZE; 958 va += PAGE_SIZE; 959 } 960 961 /* 962 * Allocate virtual address space for the message buffer. 963 */ 964 pa = msgbuf_phys = moea_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE); 965 msgbufp = (struct msgbuf *)virtual_avail; 966 va = virtual_avail; 967 virtual_avail += round_page(MSGBUF_SIZE); 968 while (va < virtual_avail) { 969 moea_kenter(mmup, va, pa); 970 pa += PAGE_SIZE; 971 va += PAGE_SIZE; 972 } 973 974 /* 975 * Allocate virtual address space for the dynamic percpu area. 976 */ 977 pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 978 dpcpu = (void *)virtual_avail; 979 va = virtual_avail; 980 virtual_avail += DPCPU_SIZE; 981 while (va < virtual_avail) { 982 moea_kenter(mmup, va, pa); 983 pa += PAGE_SIZE; 984 va += PAGE_SIZE; 985 } 986 dpcpu_init(dpcpu, 0); 987 } 988 989 /* 990 * Activate a user pmap. The pmap must be activated before it's address 991 * space can be accessed in any way. 992 */ 993 void 994 moea_activate(mmu_t mmu, struct thread *td) 995 { 996 pmap_t pm, pmr; 997 998 /* 999 * Load all the data we need up front to encourage the compiler to 1000 * not issue any loads while we have interrupts disabled below. 1001 */ 1002 pm = &td->td_proc->p_vmspace->vm_pmap; 1003 pmr = pm->pmap_phys; 1004 1005 pm->pm_active |= PCPU_GET(cpumask); 1006 PCPU_SET(curpmap, pmr); 1007 } 1008 1009 void 1010 moea_deactivate(mmu_t mmu, struct thread *td) 1011 { 1012 pmap_t pm; 1013 1014 pm = &td->td_proc->p_vmspace->vm_pmap; 1015 pm->pm_active &= ~PCPU_GET(cpumask); 1016 PCPU_SET(curpmap, NULL); 1017 } 1018 1019 void 1020 moea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1021 { 1022 struct pvo_entry *pvo; 1023 1024 PMAP_LOCK(pm); 1025 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1026 1027 if (pvo != NULL) { 1028 if (wired) { 1029 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1030 pm->pm_stats.wired_count++; 1031 pvo->pvo_vaddr |= PVO_WIRED; 1032 } else { 1033 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1034 pm->pm_stats.wired_count--; 1035 pvo->pvo_vaddr &= ~PVO_WIRED; 1036 } 1037 } 1038 PMAP_UNLOCK(pm); 1039 } 1040 1041 void 1042 moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1043 { 1044 vm_offset_t dst; 1045 vm_offset_t src; 1046 1047 dst = VM_PAGE_TO_PHYS(mdst); 1048 src = VM_PAGE_TO_PHYS(msrc); 1049 1050 kcopy((void *)src, (void *)dst, PAGE_SIZE); 1051 } 1052 1053 /* 1054 * Zero a page of physical memory by temporarily mapping it into the tlb. 1055 */ 1056 void 1057 moea_zero_page(mmu_t mmu, vm_page_t m) 1058 { 1059 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1060 void *va = (void *)pa; 1061 1062 bzero(va, PAGE_SIZE); 1063 } 1064 1065 void 1066 moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1067 { 1068 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1069 void *va = (void *)(pa + off); 1070 1071 bzero(va, size); 1072 } 1073 1074 void 1075 moea_zero_page_idle(mmu_t mmu, vm_page_t m) 1076 { 1077 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1078 void *va = (void *)pa; 1079 1080 bzero(va, PAGE_SIZE); 1081 } 1082 1083 /* 1084 * Map the given physical page at the specified virtual address in the 1085 * target pmap with the protection requested. If specified the page 1086 * will be wired down. 1087 */ 1088 void 1089 moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1090 boolean_t wired) 1091 { 1092 1093 vm_page_lock_queues(); 1094 PMAP_LOCK(pmap); 1095 moea_enter_locked(pmap, va, m, prot, wired); 1096 vm_page_unlock_queues(); 1097 PMAP_UNLOCK(pmap); 1098 } 1099 1100 /* 1101 * Map the given physical page at the specified virtual address in the 1102 * target pmap with the protection requested. If specified the page 1103 * will be wired down. 1104 * 1105 * The page queues and pmap must be locked. 1106 */ 1107 static void 1108 moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1109 boolean_t wired) 1110 { 1111 struct pvo_head *pvo_head; 1112 uma_zone_t zone; 1113 vm_page_t pg; 1114 u_int pte_lo, pvo_flags, was_exec; 1115 int error; 1116 1117 if (!moea_initialized) { 1118 pvo_head = &moea_pvo_kunmanaged; 1119 zone = moea_upvo_zone; 1120 pvo_flags = 0; 1121 pg = NULL; 1122 was_exec = PTE_EXEC; 1123 } else { 1124 pvo_head = vm_page_to_pvoh(m); 1125 pg = m; 1126 zone = moea_mpvo_zone; 1127 pvo_flags = PVO_MANAGED; 1128 was_exec = 0; 1129 } 1130 if (pmap_bootstrapped) 1131 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1132 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1133 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1134 (m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object), 1135 ("moea_enter_locked: page %p is not busy", m)); 1136 1137 /* XXX change the pvo head for fake pages */ 1138 if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) { 1139 pvo_flags &= ~PVO_MANAGED; 1140 pvo_head = &moea_pvo_kunmanaged; 1141 zone = moea_upvo_zone; 1142 } 1143 1144 /* 1145 * If this is a managed page, and it's the first reference to the page, 1146 * clear the execness of the page. Otherwise fetch the execness. 1147 */ 1148 if ((pg != NULL) && ((m->flags & PG_FICTITIOUS) == 0)) { 1149 if (LIST_EMPTY(pvo_head)) { 1150 moea_attr_clear(pg, PTE_EXEC); 1151 } else { 1152 was_exec = moea_attr_fetch(pg) & PTE_EXEC; 1153 } 1154 } 1155 1156 pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1157 1158 if (prot & VM_PROT_WRITE) { 1159 pte_lo |= PTE_BW; 1160 if (pmap_bootstrapped && 1161 (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) 1162 vm_page_flag_set(m, PG_WRITEABLE); 1163 } else 1164 pte_lo |= PTE_BR; 1165 1166 if (prot & VM_PROT_EXECUTE) 1167 pvo_flags |= PVO_EXECUTABLE; 1168 1169 if (wired) 1170 pvo_flags |= PVO_WIRED; 1171 1172 if ((m->flags & PG_FICTITIOUS) != 0) 1173 pvo_flags |= PVO_FAKE; 1174 1175 error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1176 pte_lo, pvo_flags); 1177 1178 /* 1179 * Flush the real page from the instruction cache if this page is 1180 * mapped executable and cacheable and was not previously mapped (or 1181 * was not mapped executable). 1182 */ 1183 if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && 1184 (pte_lo & PTE_I) == 0 && was_exec == 0) { 1185 /* 1186 * Flush the real memory from the cache. 1187 */ 1188 moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1189 if (pg != NULL) 1190 moea_attr_save(pg, PTE_EXEC); 1191 } 1192 1193 /* XXX syncicache always until problems are sorted */ 1194 moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1195 } 1196 1197 /* 1198 * Maps a sequence of resident pages belonging to the same object. 1199 * The sequence begins with the given page m_start. This page is 1200 * mapped at the given virtual address start. Each subsequent page is 1201 * mapped at a virtual address that is offset from start by the same 1202 * amount as the page is offset from m_start within the object. The 1203 * last page in the sequence is the page with the largest offset from 1204 * m_start that can be mapped at a virtual address less than the given 1205 * virtual address end. Not every virtual page between start and end 1206 * is mapped; only those for which a resident page exists with the 1207 * corresponding offset from m_start are mapped. 1208 */ 1209 void 1210 moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1211 vm_page_t m_start, vm_prot_t prot) 1212 { 1213 vm_page_t m; 1214 vm_pindex_t diff, psize; 1215 1216 psize = atop(end - start); 1217 m = m_start; 1218 vm_page_lock_queues(); 1219 PMAP_LOCK(pm); 1220 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1221 moea_enter_locked(pm, start + ptoa(diff), m, prot & 1222 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1223 m = TAILQ_NEXT(m, listq); 1224 } 1225 vm_page_unlock_queues(); 1226 PMAP_UNLOCK(pm); 1227 } 1228 1229 void 1230 moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1231 vm_prot_t prot) 1232 { 1233 1234 vm_page_lock_queues(); 1235 PMAP_LOCK(pm); 1236 moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1237 FALSE); 1238 vm_page_unlock_queues(); 1239 PMAP_UNLOCK(pm); 1240 } 1241 1242 vm_paddr_t 1243 moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1244 { 1245 struct pvo_entry *pvo; 1246 vm_paddr_t pa; 1247 1248 PMAP_LOCK(pm); 1249 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1250 if (pvo == NULL) 1251 pa = 0; 1252 else 1253 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1254 PMAP_UNLOCK(pm); 1255 return (pa); 1256 } 1257 1258 /* 1259 * Atomically extract and hold the physical page with the given 1260 * pmap and virtual address pair if that mapping permits the given 1261 * protection. 1262 */ 1263 vm_page_t 1264 moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1265 { 1266 struct pvo_entry *pvo; 1267 vm_page_t m; 1268 vm_paddr_t pa; 1269 1270 m = NULL; 1271 pa = 0; 1272 PMAP_LOCK(pmap); 1273 retry: 1274 pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1275 if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) && 1276 ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW || 1277 (prot & VM_PROT_WRITE) == 0)) { 1278 if (vm_page_pa_tryrelock(pmap, pvo->pvo_pte.pte.pte_lo & PTE_RPGN, &pa)) 1279 goto retry; 1280 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 1281 vm_page_hold(m); 1282 } 1283 PA_UNLOCK_COND(pa); 1284 PMAP_UNLOCK(pmap); 1285 return (m); 1286 } 1287 1288 void 1289 moea_init(mmu_t mmu) 1290 { 1291 1292 moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1293 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1294 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1295 moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1296 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1297 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1298 moea_initialized = TRUE; 1299 } 1300 1301 boolean_t 1302 moea_is_referenced(mmu_t mmu, vm_page_t m) 1303 { 1304 1305 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1306 ("moea_is_referenced: page %p is not managed", m)); 1307 return (moea_query_bit(m, PTE_REF)); 1308 } 1309 1310 boolean_t 1311 moea_is_modified(mmu_t mmu, vm_page_t m) 1312 { 1313 1314 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1315 ("moea_is_modified: page %p is not managed", m)); 1316 1317 /* 1318 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be 1319 * concurrently set while the object is locked. Thus, if PG_WRITEABLE 1320 * is clear, no PTEs can have PTE_CHG set. 1321 */ 1322 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1323 if ((m->oflags & VPO_BUSY) == 0 && 1324 (m->flags & PG_WRITEABLE) == 0) 1325 return (FALSE); 1326 return (moea_query_bit(m, PTE_CHG)); 1327 } 1328 1329 boolean_t 1330 moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1331 { 1332 struct pvo_entry *pvo; 1333 boolean_t rv; 1334 1335 PMAP_LOCK(pmap); 1336 pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1337 rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0; 1338 PMAP_UNLOCK(pmap); 1339 return (rv); 1340 } 1341 1342 void 1343 moea_clear_reference(mmu_t mmu, vm_page_t m) 1344 { 1345 1346 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1347 ("moea_clear_reference: page %p is not managed", m)); 1348 moea_clear_bit(m, PTE_REF); 1349 } 1350 1351 void 1352 moea_clear_modify(mmu_t mmu, vm_page_t m) 1353 { 1354 1355 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1356 ("moea_clear_modify: page %p is not managed", m)); 1357 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1358 KASSERT((m->oflags & VPO_BUSY) == 0, 1359 ("moea_clear_modify: page %p is busy", m)); 1360 1361 /* 1362 * If the page is not PG_WRITEABLE, then no PTEs can have PTE_CHG 1363 * set. If the object containing the page is locked and the page is 1364 * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. 1365 */ 1366 if ((m->flags & PG_WRITEABLE) == 0) 1367 return; 1368 moea_clear_bit(m, PTE_CHG); 1369 } 1370 1371 /* 1372 * Clear the write and modified bits in each of the given page's mappings. 1373 */ 1374 void 1375 moea_remove_write(mmu_t mmu, vm_page_t m) 1376 { 1377 struct pvo_entry *pvo; 1378 struct pte *pt; 1379 pmap_t pmap; 1380 u_int lo; 1381 1382 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1383 ("moea_remove_write: page %p is not managed", m)); 1384 1385 /* 1386 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by 1387 * another thread while the object is locked. Thus, if PG_WRITEABLE 1388 * is clear, no page table entries need updating. 1389 */ 1390 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1391 if ((m->oflags & VPO_BUSY) == 0 && 1392 (m->flags & PG_WRITEABLE) == 0) 1393 return; 1394 vm_page_lock_queues(); 1395 lo = moea_attr_fetch(m); 1396 powerpc_sync(); 1397 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1398 pmap = pvo->pvo_pmap; 1399 PMAP_LOCK(pmap); 1400 if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) { 1401 pt = moea_pvo_to_pte(pvo, -1); 1402 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1403 pvo->pvo_pte.pte.pte_lo |= PTE_BR; 1404 if (pt != NULL) { 1405 moea_pte_synch(pt, &pvo->pvo_pte.pte); 1406 lo |= pvo->pvo_pte.pte.pte_lo; 1407 pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG; 1408 moea_pte_change(pt, &pvo->pvo_pte.pte, 1409 pvo->pvo_vaddr); 1410 mtx_unlock(&moea_table_mutex); 1411 } 1412 } 1413 PMAP_UNLOCK(pmap); 1414 } 1415 if ((lo & PTE_CHG) != 0) { 1416 moea_attr_clear(m, PTE_CHG); 1417 vm_page_dirty(m); 1418 } 1419 vm_page_flag_clear(m, PG_WRITEABLE); 1420 vm_page_unlock_queues(); 1421 } 1422 1423 /* 1424 * moea_ts_referenced: 1425 * 1426 * Return a count of reference bits for a page, clearing those bits. 1427 * It is not necessary for every reference bit to be cleared, but it 1428 * is necessary that 0 only be returned when there are truly no 1429 * reference bits set. 1430 * 1431 * XXX: The exact number of bits to check and clear is a matter that 1432 * should be tested and standardized at some point in the future for 1433 * optimal aging of shared pages. 1434 */ 1435 boolean_t 1436 moea_ts_referenced(mmu_t mmu, vm_page_t m) 1437 { 1438 1439 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1440 ("moea_ts_referenced: page %p is not managed", m)); 1441 return (moea_clear_bit(m, PTE_REF)); 1442 } 1443 1444 /* 1445 * Modify the WIMG settings of all mappings for a page. 1446 */ 1447 void 1448 moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1449 { 1450 struct pvo_entry *pvo; 1451 struct pvo_head *pvo_head; 1452 struct pte *pt; 1453 pmap_t pmap; 1454 u_int lo; 1455 1456 if (m->flags & PG_FICTITIOUS) { 1457 m->md.mdpg_cache_attrs = ma; 1458 return; 1459 } 1460 1461 vm_page_lock_queues(); 1462 pvo_head = vm_page_to_pvoh(m); 1463 lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1464 1465 LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1466 pmap = pvo->pvo_pmap; 1467 PMAP_LOCK(pmap); 1468 pt = moea_pvo_to_pte(pvo, -1); 1469 pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG; 1470 pvo->pvo_pte.pte.pte_lo |= lo; 1471 if (pt != NULL) { 1472 moea_pte_change(pt, &pvo->pvo_pte.pte, 1473 pvo->pvo_vaddr); 1474 if (pvo->pvo_pmap == kernel_pmap) 1475 isync(); 1476 } 1477 mtx_unlock(&moea_table_mutex); 1478 PMAP_UNLOCK(pmap); 1479 } 1480 m->md.mdpg_cache_attrs = ma; 1481 vm_page_unlock_queues(); 1482 } 1483 1484 /* 1485 * Map a wired page into kernel virtual address space. 1486 */ 1487 void 1488 moea_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1489 { 1490 1491 moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1492 } 1493 1494 void 1495 moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1496 { 1497 u_int pte_lo; 1498 int error; 1499 1500 #if 0 1501 if (va < VM_MIN_KERNEL_ADDRESS) 1502 panic("moea_kenter: attempt to enter non-kernel address %#x", 1503 va); 1504 #endif 1505 1506 pte_lo = moea_calc_wimg(pa, ma); 1507 1508 PMAP_LOCK(kernel_pmap); 1509 error = moea_pvo_enter(kernel_pmap, moea_upvo_zone, 1510 &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1511 1512 if (error != 0 && error != ENOENT) 1513 panic("moea_kenter: failed to enter va %#x pa %#x: %d", va, 1514 pa, error); 1515 1516 /* 1517 * Flush the real memory from the instruction cache. 1518 */ 1519 if ((pte_lo & (PTE_I | PTE_G)) == 0) { 1520 moea_syncicache(pa, PAGE_SIZE); 1521 } 1522 PMAP_UNLOCK(kernel_pmap); 1523 } 1524 1525 /* 1526 * Extract the physical page address associated with the given kernel virtual 1527 * address. 1528 */ 1529 vm_offset_t 1530 moea_kextract(mmu_t mmu, vm_offset_t va) 1531 { 1532 struct pvo_entry *pvo; 1533 vm_paddr_t pa; 1534 1535 /* 1536 * Allow direct mappings on 32-bit OEA 1537 */ 1538 if (va < VM_MIN_KERNEL_ADDRESS) { 1539 return (va); 1540 } 1541 1542 PMAP_LOCK(kernel_pmap); 1543 pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1544 KASSERT(pvo != NULL, ("moea_kextract: no addr found")); 1545 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1546 PMAP_UNLOCK(kernel_pmap); 1547 return (pa); 1548 } 1549 1550 /* 1551 * Remove a wired page from kernel virtual address space. 1552 */ 1553 void 1554 moea_kremove(mmu_t mmu, vm_offset_t va) 1555 { 1556 1557 moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1558 } 1559 1560 /* 1561 * Map a range of physical addresses into kernel virtual address space. 1562 * 1563 * The value passed in *virt is a suggested virtual address for the mapping. 1564 * Architectures which can support a direct-mapped physical to virtual region 1565 * can return the appropriate address within that region, leaving '*virt' 1566 * unchanged. We cannot and therefore do not; *virt is updated with the 1567 * first usable address after the mapped region. 1568 */ 1569 vm_offset_t 1570 moea_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1571 vm_offset_t pa_end, int prot) 1572 { 1573 vm_offset_t sva, va; 1574 1575 sva = *virt; 1576 va = sva; 1577 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1578 moea_kenter(mmu, va, pa_start); 1579 *virt = va; 1580 return (sva); 1581 } 1582 1583 /* 1584 * Returns true if the pmap's pv is one of the first 1585 * 16 pvs linked to from this page. This count may 1586 * be changed upwards or downwards in the future; it 1587 * is only necessary that true be returned for a small 1588 * subset of pmaps for proper page aging. 1589 */ 1590 boolean_t 1591 moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1592 { 1593 int loops; 1594 struct pvo_entry *pvo; 1595 boolean_t rv; 1596 1597 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1598 ("moea_page_exists_quick: page %p is not managed", m)); 1599 loops = 0; 1600 rv = FALSE; 1601 vm_page_lock_queues(); 1602 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1603 if (pvo->pvo_pmap == pmap) { 1604 rv = TRUE; 1605 break; 1606 } 1607 if (++loops >= 16) 1608 break; 1609 } 1610 vm_page_unlock_queues(); 1611 return (rv); 1612 } 1613 1614 /* 1615 * Return the number of managed mappings to the given physical page 1616 * that are wired. 1617 */ 1618 int 1619 moea_page_wired_mappings(mmu_t mmu, vm_page_t m) 1620 { 1621 struct pvo_entry *pvo; 1622 int count; 1623 1624 count = 0; 1625 if ((m->flags & PG_FICTITIOUS) != 0) 1626 return (count); 1627 vm_page_lock_queues(); 1628 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1629 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1630 count++; 1631 vm_page_unlock_queues(); 1632 return (count); 1633 } 1634 1635 static u_int moea_vsidcontext; 1636 1637 void 1638 moea_pinit(mmu_t mmu, pmap_t pmap) 1639 { 1640 int i, mask; 1641 u_int entropy; 1642 1643 KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap")); 1644 PMAP_LOCK_INIT(pmap); 1645 1646 entropy = 0; 1647 __asm __volatile("mftb %0" : "=r"(entropy)); 1648 1649 if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap)) 1650 == NULL) { 1651 pmap->pmap_phys = pmap; 1652 } 1653 1654 1655 mtx_lock(&moea_vsid_mutex); 1656 /* 1657 * Allocate some segment registers for this pmap. 1658 */ 1659 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1660 u_int hash, n; 1661 1662 /* 1663 * Create a new value by mutiplying by a prime and adding in 1664 * entropy from the timebase register. This is to make the 1665 * VSID more random so that the PT hash function collides 1666 * less often. (Note that the prime casues gcc to do shifts 1667 * instead of a multiply.) 1668 */ 1669 moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy; 1670 hash = moea_vsidcontext & (NPMAPS - 1); 1671 if (hash == 0) /* 0 is special, avoid it */ 1672 continue; 1673 n = hash >> 5; 1674 mask = 1 << (hash & (VSID_NBPW - 1)); 1675 hash = (moea_vsidcontext & 0xfffff); 1676 if (moea_vsid_bitmap[n] & mask) { /* collision? */ 1677 /* anything free in this bucket? */ 1678 if (moea_vsid_bitmap[n] == 0xffffffff) { 1679 entropy = (moea_vsidcontext >> 20); 1680 continue; 1681 } 1682 i = ffs(~moea_vsid_bitmap[n]) - 1; 1683 mask = 1 << i; 1684 hash &= 0xfffff & ~(VSID_NBPW - 1); 1685 hash |= i; 1686 } 1687 moea_vsid_bitmap[n] |= mask; 1688 for (i = 0; i < 16; i++) 1689 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1690 mtx_unlock(&moea_vsid_mutex); 1691 return; 1692 } 1693 1694 mtx_unlock(&moea_vsid_mutex); 1695 panic("moea_pinit: out of segments"); 1696 } 1697 1698 /* 1699 * Initialize the pmap associated with process 0. 1700 */ 1701 void 1702 moea_pinit0(mmu_t mmu, pmap_t pm) 1703 { 1704 1705 moea_pinit(mmu, pm); 1706 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1707 } 1708 1709 /* 1710 * Set the physical protection on the specified range of this map as requested. 1711 */ 1712 void 1713 moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1714 vm_prot_t prot) 1715 { 1716 struct pvo_entry *pvo; 1717 struct pte *pt; 1718 int pteidx; 1719 1720 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1721 ("moea_protect: non current pmap")); 1722 1723 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1724 moea_remove(mmu, pm, sva, eva); 1725 return; 1726 } 1727 1728 vm_page_lock_queues(); 1729 PMAP_LOCK(pm); 1730 for (; sva < eva; sva += PAGE_SIZE) { 1731 pvo = moea_pvo_find_va(pm, sva, &pteidx); 1732 if (pvo == NULL) 1733 continue; 1734 1735 if ((prot & VM_PROT_EXECUTE) == 0) 1736 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1737 1738 /* 1739 * Grab the PTE pointer before we diddle with the cached PTE 1740 * copy. 1741 */ 1742 pt = moea_pvo_to_pte(pvo, pteidx); 1743 /* 1744 * Change the protection of the page. 1745 */ 1746 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1747 pvo->pvo_pte.pte.pte_lo |= PTE_BR; 1748 1749 /* 1750 * If the PVO is in the page table, update that pte as well. 1751 */ 1752 if (pt != NULL) { 1753 moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 1754 mtx_unlock(&moea_table_mutex); 1755 } 1756 } 1757 vm_page_unlock_queues(); 1758 PMAP_UNLOCK(pm); 1759 } 1760 1761 /* 1762 * Map a list of wired pages into kernel virtual address space. This is 1763 * intended for temporary mappings which do not need page modification or 1764 * references recorded. Existing mappings in the region are overwritten. 1765 */ 1766 void 1767 moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1768 { 1769 vm_offset_t va; 1770 1771 va = sva; 1772 while (count-- > 0) { 1773 moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1774 va += PAGE_SIZE; 1775 m++; 1776 } 1777 } 1778 1779 /* 1780 * Remove page mappings from kernel virtual address space. Intended for 1781 * temporary mappings entered by moea_qenter. 1782 */ 1783 void 1784 moea_qremove(mmu_t mmu, vm_offset_t sva, int count) 1785 { 1786 vm_offset_t va; 1787 1788 va = sva; 1789 while (count-- > 0) { 1790 moea_kremove(mmu, va); 1791 va += PAGE_SIZE; 1792 } 1793 } 1794 1795 void 1796 moea_release(mmu_t mmu, pmap_t pmap) 1797 { 1798 int idx, mask; 1799 1800 /* 1801 * Free segment register's VSID 1802 */ 1803 if (pmap->pm_sr[0] == 0) 1804 panic("moea_release"); 1805 1806 mtx_lock(&moea_vsid_mutex); 1807 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1808 mask = 1 << (idx % VSID_NBPW); 1809 idx /= VSID_NBPW; 1810 moea_vsid_bitmap[idx] &= ~mask; 1811 mtx_unlock(&moea_vsid_mutex); 1812 PMAP_LOCK_DESTROY(pmap); 1813 } 1814 1815 /* 1816 * Remove the given range of addresses from the specified map. 1817 */ 1818 void 1819 moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1820 { 1821 struct pvo_entry *pvo; 1822 int pteidx; 1823 1824 vm_page_lock_queues(); 1825 PMAP_LOCK(pm); 1826 for (; sva < eva; sva += PAGE_SIZE) { 1827 pvo = moea_pvo_find_va(pm, sva, &pteidx); 1828 if (pvo != NULL) { 1829 moea_pvo_remove(pvo, pteidx); 1830 } 1831 } 1832 PMAP_UNLOCK(pm); 1833 vm_page_unlock_queues(); 1834 } 1835 1836 /* 1837 * Remove physical page from all pmaps in which it resides. moea_pvo_remove() 1838 * will reflect changes in pte's back to the vm_page. 1839 */ 1840 void 1841 moea_remove_all(mmu_t mmu, vm_page_t m) 1842 { 1843 struct pvo_head *pvo_head; 1844 struct pvo_entry *pvo, *next_pvo; 1845 pmap_t pmap; 1846 1847 vm_page_lock_queues(); 1848 pvo_head = vm_page_to_pvoh(m); 1849 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1850 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1851 1852 MOEA_PVO_CHECK(pvo); /* sanity check */ 1853 pmap = pvo->pvo_pmap; 1854 PMAP_LOCK(pmap); 1855 moea_pvo_remove(pvo, -1); 1856 PMAP_UNLOCK(pmap); 1857 } 1858 if ((m->flags & PG_WRITEABLE) && moea_is_modified(mmu, m)) { 1859 moea_attr_clear(m, PTE_CHG); 1860 vm_page_dirty(m); 1861 } 1862 vm_page_flag_clear(m, PG_WRITEABLE); 1863 vm_page_unlock_queues(); 1864 } 1865 1866 /* 1867 * Allocate a physical page of memory directly from the phys_avail map. 1868 * Can only be called from moea_bootstrap before avail start and end are 1869 * calculated. 1870 */ 1871 static vm_offset_t 1872 moea_bootstrap_alloc(vm_size_t size, u_int align) 1873 { 1874 vm_offset_t s, e; 1875 int i, j; 1876 1877 size = round_page(size); 1878 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1879 if (align != 0) 1880 s = (phys_avail[i] + align - 1) & ~(align - 1); 1881 else 1882 s = phys_avail[i]; 1883 e = s + size; 1884 1885 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1886 continue; 1887 1888 if (s == phys_avail[i]) { 1889 phys_avail[i] += size; 1890 } else if (e == phys_avail[i + 1]) { 1891 phys_avail[i + 1] -= size; 1892 } else { 1893 for (j = phys_avail_count * 2; j > i; j -= 2) { 1894 phys_avail[j] = phys_avail[j - 2]; 1895 phys_avail[j + 1] = phys_avail[j - 1]; 1896 } 1897 1898 phys_avail[i + 3] = phys_avail[i + 1]; 1899 phys_avail[i + 1] = s; 1900 phys_avail[i + 2] = e; 1901 phys_avail_count++; 1902 } 1903 1904 return (s); 1905 } 1906 panic("moea_bootstrap_alloc: could not allocate memory"); 1907 } 1908 1909 static void 1910 moea_syncicache(vm_offset_t pa, vm_size_t len) 1911 { 1912 __syncicache((void *)pa, len); 1913 } 1914 1915 static int 1916 moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 1917 vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 1918 { 1919 struct pvo_entry *pvo; 1920 u_int sr; 1921 int first; 1922 u_int ptegidx; 1923 int i; 1924 int bootstrap; 1925 1926 moea_pvo_enter_calls++; 1927 first = 0; 1928 bootstrap = 0; 1929 1930 /* 1931 * Compute the PTE Group index. 1932 */ 1933 va &= ~ADDR_POFF; 1934 sr = va_to_sr(pm->pm_sr, va); 1935 ptegidx = va_to_pteg(sr, va); 1936 1937 /* 1938 * Remove any existing mapping for this page. Reuse the pvo entry if 1939 * there is a mapping. 1940 */ 1941 mtx_lock(&moea_table_mutex); 1942 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 1943 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1944 if ((pvo->pvo_pte.pte.pte_lo & PTE_RPGN) == pa && 1945 (pvo->pvo_pte.pte.pte_lo & PTE_PP) == 1946 (pte_lo & PTE_PP)) { 1947 mtx_unlock(&moea_table_mutex); 1948 return (0); 1949 } 1950 moea_pvo_remove(pvo, -1); 1951 break; 1952 } 1953 } 1954 1955 /* 1956 * If we aren't overwriting a mapping, try to allocate. 1957 */ 1958 if (moea_initialized) { 1959 pvo = uma_zalloc(zone, M_NOWAIT); 1960 } else { 1961 if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) { 1962 panic("moea_enter: bpvo pool exhausted, %d, %d, %d", 1963 moea_bpvo_pool_index, BPVO_POOL_SIZE, 1964 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 1965 } 1966 pvo = &moea_bpvo_pool[moea_bpvo_pool_index]; 1967 moea_bpvo_pool_index++; 1968 bootstrap = 1; 1969 } 1970 1971 if (pvo == NULL) { 1972 mtx_unlock(&moea_table_mutex); 1973 return (ENOMEM); 1974 } 1975 1976 moea_pvo_entries++; 1977 pvo->pvo_vaddr = va; 1978 pvo->pvo_pmap = pm; 1979 LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink); 1980 pvo->pvo_vaddr &= ~ADDR_POFF; 1981 if (flags & VM_PROT_EXECUTE) 1982 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1983 if (flags & PVO_WIRED) 1984 pvo->pvo_vaddr |= PVO_WIRED; 1985 if (pvo_head != &moea_pvo_kunmanaged) 1986 pvo->pvo_vaddr |= PVO_MANAGED; 1987 if (bootstrap) 1988 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1989 if (flags & PVO_FAKE) 1990 pvo->pvo_vaddr |= PVO_FAKE; 1991 1992 moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo); 1993 1994 /* 1995 * Remember if the list was empty and therefore will be the first 1996 * item. 1997 */ 1998 if (LIST_FIRST(pvo_head) == NULL) 1999 first = 1; 2000 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2001 2002 if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 2003 pm->pm_stats.wired_count++; 2004 pm->pm_stats.resident_count++; 2005 2006 /* 2007 * We hope this succeeds but it isn't required. 2008 */ 2009 i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 2010 if (i >= 0) { 2011 PVO_PTEGIDX_SET(pvo, i); 2012 } else { 2013 panic("moea_pvo_enter: overflow"); 2014 moea_pte_overflow++; 2015 } 2016 mtx_unlock(&moea_table_mutex); 2017 2018 return (first ? ENOENT : 0); 2019 } 2020 2021 static void 2022 moea_pvo_remove(struct pvo_entry *pvo, int pteidx) 2023 { 2024 struct pte *pt; 2025 2026 /* 2027 * If there is an active pte entry, we need to deactivate it (and 2028 * save the ref & cfg bits). 2029 */ 2030 pt = moea_pvo_to_pte(pvo, pteidx); 2031 if (pt != NULL) { 2032 moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 2033 mtx_unlock(&moea_table_mutex); 2034 PVO_PTEGIDX_CLR(pvo); 2035 } else { 2036 moea_pte_overflow--; 2037 } 2038 2039 /* 2040 * Update our statistics. 2041 */ 2042 pvo->pvo_pmap->pm_stats.resident_count--; 2043 if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 2044 pvo->pvo_pmap->pm_stats.wired_count--; 2045 2046 /* 2047 * Save the REF/CHG bits into their cache if the page is managed. 2048 */ 2049 if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { 2050 struct vm_page *pg; 2051 2052 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 2053 if (pg != NULL) { 2054 moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo & 2055 (PTE_REF | PTE_CHG)); 2056 } 2057 } 2058 2059 /* 2060 * Remove this PVO from the PV list. 2061 */ 2062 LIST_REMOVE(pvo, pvo_vlink); 2063 2064 /* 2065 * Remove this from the overflow list and return it to the pool 2066 * if we aren't going to reuse it. 2067 */ 2068 LIST_REMOVE(pvo, pvo_olink); 2069 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2070 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone : 2071 moea_upvo_zone, pvo); 2072 moea_pvo_entries--; 2073 moea_pvo_remove_calls++; 2074 } 2075 2076 static __inline int 2077 moea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 2078 { 2079 int pteidx; 2080 2081 /* 2082 * We can find the actual pte entry without searching by grabbing 2083 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 2084 * noticing the HID bit. 2085 */ 2086 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 2087 if (pvo->pvo_pte.pte.pte_hi & PTE_HID) 2088 pteidx ^= moea_pteg_mask * 8; 2089 2090 return (pteidx); 2091 } 2092 2093 static struct pvo_entry * 2094 moea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 2095 { 2096 struct pvo_entry *pvo; 2097 int ptegidx; 2098 u_int sr; 2099 2100 va &= ~ADDR_POFF; 2101 sr = va_to_sr(pm->pm_sr, va); 2102 ptegidx = va_to_pteg(sr, va); 2103 2104 mtx_lock(&moea_table_mutex); 2105 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2106 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2107 if (pteidx_p) 2108 *pteidx_p = moea_pvo_pte_index(pvo, ptegidx); 2109 break; 2110 } 2111 } 2112 mtx_unlock(&moea_table_mutex); 2113 2114 return (pvo); 2115 } 2116 2117 static struct pte * 2118 moea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 2119 { 2120 struct pte *pt; 2121 2122 /* 2123 * If we haven't been supplied the ptegidx, calculate it. 2124 */ 2125 if (pteidx == -1) { 2126 int ptegidx; 2127 u_int sr; 2128 2129 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 2130 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 2131 pteidx = moea_pvo_pte_index(pvo, ptegidx); 2132 } 2133 2134 pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2135 mtx_lock(&moea_table_mutex); 2136 2137 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 2138 panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no " 2139 "valid pte index", pvo); 2140 } 2141 2142 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 2143 panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo " 2144 "pvo but no valid pte", pvo); 2145 } 2146 2147 if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 2148 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) { 2149 panic("moea_pvo_to_pte: pvo %p has valid pte in " 2150 "moea_pteg_table %p but invalid in pvo", pvo, pt); 2151 } 2152 2153 if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 2154 != 0) { 2155 panic("moea_pvo_to_pte: pvo %p pte does not match " 2156 "pte %p in moea_pteg_table", pvo, pt); 2157 } 2158 2159 mtx_assert(&moea_table_mutex, MA_OWNED); 2160 return (pt); 2161 } 2162 2163 if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) { 2164 panic("moea_pvo_to_pte: pvo %p has invalid pte %p in " 2165 "moea_pteg_table but valid in pvo", pvo, pt); 2166 } 2167 2168 mtx_unlock(&moea_table_mutex); 2169 return (NULL); 2170 } 2171 2172 /* 2173 * XXX: THIS STUFF SHOULD BE IN pte.c? 2174 */ 2175 int 2176 moea_pte_spill(vm_offset_t addr) 2177 { 2178 struct pvo_entry *source_pvo, *victim_pvo; 2179 struct pvo_entry *pvo; 2180 int ptegidx, i, j; 2181 u_int sr; 2182 struct pteg *pteg; 2183 struct pte *pt; 2184 2185 moea_pte_spills++; 2186 2187 sr = mfsrin(addr); 2188 ptegidx = va_to_pteg(sr, addr); 2189 2190 /* 2191 * Have to substitute some entry. Use the primary hash for this. 2192 * Use low bits of timebase as random generator. 2193 */ 2194 pteg = &moea_pteg_table[ptegidx]; 2195 mtx_lock(&moea_table_mutex); 2196 __asm __volatile("mftb %0" : "=r"(i)); 2197 i &= 7; 2198 pt = &pteg->pt[i]; 2199 2200 source_pvo = NULL; 2201 victim_pvo = NULL; 2202 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2203 /* 2204 * We need to find a pvo entry for this address. 2205 */ 2206 MOEA_PVO_CHECK(pvo); 2207 if (source_pvo == NULL && 2208 moea_pte_match(&pvo->pvo_pte.pte, sr, addr, 2209 pvo->pvo_pte.pte.pte_hi & PTE_HID)) { 2210 /* 2211 * Now found an entry to be spilled into the pteg. 2212 * The PTE is now valid, so we know it's active. 2213 */ 2214 j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 2215 2216 if (j >= 0) { 2217 PVO_PTEGIDX_SET(pvo, j); 2218 moea_pte_overflow--; 2219 MOEA_PVO_CHECK(pvo); 2220 mtx_unlock(&moea_table_mutex); 2221 return (1); 2222 } 2223 2224 source_pvo = pvo; 2225 2226 if (victim_pvo != NULL) 2227 break; 2228 } 2229 2230 /* 2231 * We also need the pvo entry of the victim we are replacing 2232 * so save the R & C bits of the PTE. 2233 */ 2234 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2235 moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 2236 victim_pvo = pvo; 2237 if (source_pvo != NULL) 2238 break; 2239 } 2240 } 2241 2242 if (source_pvo == NULL) { 2243 mtx_unlock(&moea_table_mutex); 2244 return (0); 2245 } 2246 2247 if (victim_pvo == NULL) { 2248 if ((pt->pte_hi & PTE_HID) == 0) 2249 panic("moea_pte_spill: victim p-pte (%p) has no pvo" 2250 "entry", pt); 2251 2252 /* 2253 * If this is a secondary PTE, we need to search it's primary 2254 * pvo bucket for the matching PVO. 2255 */ 2256 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask], 2257 pvo_olink) { 2258 MOEA_PVO_CHECK(pvo); 2259 /* 2260 * We also need the pvo entry of the victim we are 2261 * replacing so save the R & C bits of the PTE. 2262 */ 2263 if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 2264 victim_pvo = pvo; 2265 break; 2266 } 2267 } 2268 2269 if (victim_pvo == NULL) 2270 panic("moea_pte_spill: victim s-pte (%p) has no pvo" 2271 "entry", pt); 2272 } 2273 2274 /* 2275 * We are invalidating the TLB entry for the EA we are replacing even 2276 * though it's valid. If we don't, we lose any ref/chg bit changes 2277 * contained in the TLB entry. 2278 */ 2279 source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID; 2280 2281 moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); 2282 moea_pte_set(pt, &source_pvo->pvo_pte.pte); 2283 2284 PVO_PTEGIDX_CLR(victim_pvo); 2285 PVO_PTEGIDX_SET(source_pvo, i); 2286 moea_pte_replacements++; 2287 2288 MOEA_PVO_CHECK(victim_pvo); 2289 MOEA_PVO_CHECK(source_pvo); 2290 2291 mtx_unlock(&moea_table_mutex); 2292 return (1); 2293 } 2294 2295 static int 2296 moea_pte_insert(u_int ptegidx, struct pte *pvo_pt) 2297 { 2298 struct pte *pt; 2299 int i; 2300 2301 mtx_assert(&moea_table_mutex, MA_OWNED); 2302 2303 /* 2304 * First try primary hash. 2305 */ 2306 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2307 if ((pt->pte_hi & PTE_VALID) == 0) { 2308 pvo_pt->pte_hi &= ~PTE_HID; 2309 moea_pte_set(pt, pvo_pt); 2310 return (i); 2311 } 2312 } 2313 2314 /* 2315 * Now try secondary hash. 2316 */ 2317 ptegidx ^= moea_pteg_mask; 2318 2319 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2320 if ((pt->pte_hi & PTE_VALID) == 0) { 2321 pvo_pt->pte_hi |= PTE_HID; 2322 moea_pte_set(pt, pvo_pt); 2323 return (i); 2324 } 2325 } 2326 2327 panic("moea_pte_insert: overflow"); 2328 return (-1); 2329 } 2330 2331 static boolean_t 2332 moea_query_bit(vm_page_t m, int ptebit) 2333 { 2334 struct pvo_entry *pvo; 2335 struct pte *pt; 2336 2337 if (moea_attr_fetch(m) & ptebit) 2338 return (TRUE); 2339 2340 vm_page_lock_queues(); 2341 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2342 MOEA_PVO_CHECK(pvo); /* sanity check */ 2343 2344 /* 2345 * See if we saved the bit off. If so, cache it and return 2346 * success. 2347 */ 2348 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2349 moea_attr_save(m, ptebit); 2350 MOEA_PVO_CHECK(pvo); /* sanity check */ 2351 vm_page_unlock_queues(); 2352 return (TRUE); 2353 } 2354 } 2355 2356 /* 2357 * No luck, now go through the hard part of looking at the PTEs 2358 * themselves. Sync so that any pending REF/CHG bits are flushed to 2359 * the PTEs. 2360 */ 2361 powerpc_sync(); 2362 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2363 MOEA_PVO_CHECK(pvo); /* sanity check */ 2364 2365 /* 2366 * See if this pvo has a valid PTE. if so, fetch the 2367 * REF/CHG bits from the valid PTE. If the appropriate 2368 * ptebit is set, cache it and return success. 2369 */ 2370 pt = moea_pvo_to_pte(pvo, -1); 2371 if (pt != NULL) { 2372 moea_pte_synch(pt, &pvo->pvo_pte.pte); 2373 mtx_unlock(&moea_table_mutex); 2374 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2375 moea_attr_save(m, ptebit); 2376 MOEA_PVO_CHECK(pvo); /* sanity check */ 2377 vm_page_unlock_queues(); 2378 return (TRUE); 2379 } 2380 } 2381 } 2382 2383 vm_page_unlock_queues(); 2384 return (FALSE); 2385 } 2386 2387 static u_int 2388 moea_clear_bit(vm_page_t m, int ptebit) 2389 { 2390 u_int count; 2391 struct pvo_entry *pvo; 2392 struct pte *pt; 2393 2394 vm_page_lock_queues(); 2395 2396 /* 2397 * Clear the cached value. 2398 */ 2399 moea_attr_clear(m, ptebit); 2400 2401 /* 2402 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2403 * we can reset the right ones). note that since the pvo entries and 2404 * list heads are accessed via BAT0 and are never placed in the page 2405 * table, we don't have to worry about further accesses setting the 2406 * REF/CHG bits. 2407 */ 2408 powerpc_sync(); 2409 2410 /* 2411 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2412 * valid pte clear the ptebit from the valid pte. 2413 */ 2414 count = 0; 2415 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2416 MOEA_PVO_CHECK(pvo); /* sanity check */ 2417 pt = moea_pvo_to_pte(pvo, -1); 2418 if (pt != NULL) { 2419 moea_pte_synch(pt, &pvo->pvo_pte.pte); 2420 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2421 count++; 2422 moea_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2423 } 2424 mtx_unlock(&moea_table_mutex); 2425 } 2426 pvo->pvo_pte.pte.pte_lo &= ~ptebit; 2427 MOEA_PVO_CHECK(pvo); /* sanity check */ 2428 } 2429 2430 vm_page_unlock_queues(); 2431 return (count); 2432 } 2433 2434 /* 2435 * Return true if the physical range is encompassed by the battable[idx] 2436 */ 2437 static int 2438 moea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) 2439 { 2440 u_int prot; 2441 u_int32_t start; 2442 u_int32_t end; 2443 u_int32_t bat_ble; 2444 2445 /* 2446 * Return immediately if not a valid mapping 2447 */ 2448 if (!(battable[idx].batu & BAT_Vs)) 2449 return (EINVAL); 2450 2451 /* 2452 * The BAT entry must be cache-inhibited, guarded, and r/w 2453 * so it can function as an i/o page 2454 */ 2455 prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2456 if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2457 return (EPERM); 2458 2459 /* 2460 * The address should be within the BAT range. Assume that the 2461 * start address in the BAT has the correct alignment (thus 2462 * not requiring masking) 2463 */ 2464 start = battable[idx].batl & BAT_PBS; 2465 bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2466 end = start | (bat_ble << 15) | 0x7fff; 2467 2468 if ((pa < start) || ((pa + size) > end)) 2469 return (ERANGE); 2470 2471 return (0); 2472 } 2473 2474 boolean_t 2475 moea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2476 { 2477 int i; 2478 2479 /* 2480 * This currently does not work for entries that 2481 * overlap 256M BAT segments. 2482 */ 2483 2484 for(i = 0; i < 16; i++) 2485 if (moea_bat_mapped(i, pa, size) == 0) 2486 return (0); 2487 2488 return (EFAULT); 2489 } 2490 2491 /* 2492 * Map a set of physical memory pages into the kernel virtual 2493 * address space. Return a pointer to where it is mapped. This 2494 * routine is intended to be used for mapping device memory, 2495 * NOT real memory. 2496 */ 2497 void * 2498 moea_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2499 { 2500 2501 return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); 2502 } 2503 2504 void * 2505 moea_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2506 { 2507 vm_offset_t va, tmpva, ppa, offset; 2508 int i; 2509 2510 ppa = trunc_page(pa); 2511 offset = pa & PAGE_MASK; 2512 size = roundup(offset + size, PAGE_SIZE); 2513 2514 /* 2515 * If the physical address lies within a valid BAT table entry, 2516 * return the 1:1 mapping. This currently doesn't work 2517 * for regions that overlap 256M BAT segments. 2518 */ 2519 for (i = 0; i < 16; i++) { 2520 if (moea_bat_mapped(i, pa, size) == 0) 2521 return ((void *) pa); 2522 } 2523 2524 va = kmem_alloc_nofault(kernel_map, size); 2525 if (!va) 2526 panic("moea_mapdev: Couldn't alloc kernel virtual memory"); 2527 2528 for (tmpva = va; size > 0;) { 2529 moea_kenter_attr(mmu, tmpva, ppa, ma); 2530 tlbie(tmpva); 2531 size -= PAGE_SIZE; 2532 tmpva += PAGE_SIZE; 2533 ppa += PAGE_SIZE; 2534 } 2535 2536 return ((void *)(va + offset)); 2537 } 2538 2539 void 2540 moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2541 { 2542 vm_offset_t base, offset; 2543 2544 /* 2545 * If this is outside kernel virtual space, then it's a 2546 * battable entry and doesn't require unmapping 2547 */ 2548 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) { 2549 base = trunc_page(va); 2550 offset = va & PAGE_MASK; 2551 size = roundup(offset + size, PAGE_SIZE); 2552 kmem_free(kernel_map, base, size); 2553 } 2554 } 2555 2556 static void 2557 moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2558 { 2559 struct pvo_entry *pvo; 2560 vm_offset_t lim; 2561 vm_paddr_t pa; 2562 vm_size_t len; 2563 2564 PMAP_LOCK(pm); 2565 while (sz > 0) { 2566 lim = round_page(va); 2567 len = MIN(lim - va, sz); 2568 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2569 if (pvo != NULL) { 2570 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | 2571 (va & ADDR_POFF); 2572 moea_syncicache(pa, len); 2573 } 2574 va += len; 2575 sz -= len; 2576 } 2577 PMAP_UNLOCK(pm); 2578 } 2579