1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2020 Justin Hibbits 5 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 6 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 21 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 23 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 24 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 25 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 26 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * Some hw specific parts of this pmap were derived or influenced 30 * by NetBSD's ibm4xx pmap module. More generic code is shared with 31 * a few other pmap modules from the FreeBSD tree. 32 */ 33 34 /* 35 * VM layout notes: 36 * 37 * Kernel and user threads run within one common virtual address space 38 * defined by AS=0. 39 * 40 * 32-bit pmap: 41 * Virtual address space layout: 42 * ----------------------------- 43 * 0x0000_0000 - 0x7fff_ffff : user process 44 * 0x8000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 45 * 0xc000_0000 - 0xffff_efff : KVA 46 */ 47 48 #include <sys/cdefs.h> 49 __FBSDID("$FreeBSD$"); 50 51 #include "opt_ddb.h" 52 #include "opt_kstack_pages.h" 53 54 #include <sys/param.h> 55 #include <sys/conf.h> 56 #include <sys/malloc.h> 57 #include <sys/ktr.h> 58 #include <sys/proc.h> 59 #include <sys/user.h> 60 #include <sys/queue.h> 61 #include <sys/systm.h> 62 #include <sys/kernel.h> 63 #include <sys/kerneldump.h> 64 #include <sys/linker.h> 65 #include <sys/msgbuf.h> 66 #include <sys/lock.h> 67 #include <sys/mutex.h> 68 #include <sys/rwlock.h> 69 #include <sys/sched.h> 70 #include <sys/smp.h> 71 #include <sys/vmmeter.h> 72 73 #include <vm/vm.h> 74 #include <vm/vm_page.h> 75 #include <vm/vm_kern.h> 76 #include <vm/vm_pageout.h> 77 #include <vm/vm_extern.h> 78 #include <vm/vm_object.h> 79 #include <vm/vm_param.h> 80 #include <vm/vm_map.h> 81 #include <vm/vm_pager.h> 82 #include <vm/vm_phys.h> 83 #include <vm/vm_pagequeue.h> 84 #include <vm/uma.h> 85 86 #include <machine/_inttypes.h> 87 #include <machine/cpu.h> 88 #include <machine/pcb.h> 89 #include <machine/platform.h> 90 91 #include <machine/tlb.h> 92 #include <machine/spr.h> 93 #include <machine/md_var.h> 94 #include <machine/mmuvar.h> 95 #include <machine/pmap.h> 96 #include <machine/pte.h> 97 98 #include <ddb/ddb.h> 99 100 #include "mmu_if.h" 101 102 #define PRI0ptrX "08x" 103 104 /* Reserved KVA space and mutex for mmu_booke_zero_page. */ 105 static vm_offset_t zero_page_va; 106 static struct mtx zero_page_mutex; 107 108 /* Reserved KVA space and mutex for mmu_booke_copy_page. */ 109 static vm_offset_t copy_page_src_va; 110 static vm_offset_t copy_page_dst_va; 111 static struct mtx copy_page_mutex; 112 113 static vm_offset_t kernel_ptbl_root; 114 static unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 115 116 /**************************************************************************/ 117 /* PMAP */ 118 /**************************************************************************/ 119 120 #define VM_MAPDEV_BASE ((vm_offset_t)VM_MAXUSER_ADDRESS + PAGE_SIZE) 121 122 static void tid_flush(tlbtid_t tid); 123 static unsigned long ilog2(unsigned long); 124 125 /**************************************************************************/ 126 /* Page table management */ 127 /**************************************************************************/ 128 129 #define PMAP_ROOT_SIZE (sizeof(pte_t**) * PDIR_NENTRIES) 130 static void ptbl_init(void); 131 static struct ptbl_buf *ptbl_buf_alloc(void); 132 static void ptbl_buf_free(struct ptbl_buf *); 133 static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 134 135 static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t); 136 static void ptbl_free(mmu_t, pmap_t, unsigned int); 137 static void ptbl_hold(mmu_t, pmap_t, unsigned int); 138 static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 139 140 static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 141 static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t); 142 static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 143 static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 144 145 struct ptbl_buf { 146 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 147 vm_offset_t kva; /* va of mapping */ 148 }; 149 150 /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 151 #define PTBL_BUFS (128 * 16) 152 153 /* ptbl free list and a lock used for access synchronization. */ 154 static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 155 static struct mtx ptbl_buf_freelist_lock; 156 157 /* Base address of kva space allocated fot ptbl bufs. */ 158 static vm_offset_t ptbl_buf_pool_vabase; 159 160 /* Pointer to ptbl_buf structures. */ 161 static struct ptbl_buf *ptbl_bufs; 162 163 /**************************************************************************/ 164 /* Page table related */ 165 /**************************************************************************/ 166 167 168 /* Initialize pool of kva ptbl buffers. */ 169 static void 170 ptbl_init(void) 171 { 172 int i; 173 174 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 175 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 176 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 177 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 178 179 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 180 TAILQ_INIT(&ptbl_buf_freelist); 181 182 for (i = 0; i < PTBL_BUFS; i++) { 183 ptbl_bufs[i].kva = 184 ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 185 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 186 } 187 } 188 189 /* Get a ptbl_buf from the freelist. */ 190 static struct ptbl_buf * 191 ptbl_buf_alloc(void) 192 { 193 struct ptbl_buf *buf; 194 195 mtx_lock(&ptbl_buf_freelist_lock); 196 buf = TAILQ_FIRST(&ptbl_buf_freelist); 197 if (buf != NULL) 198 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 199 mtx_unlock(&ptbl_buf_freelist_lock); 200 201 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 202 203 return (buf); 204 } 205 206 /* Return ptbl buff to free pool. */ 207 static void 208 ptbl_buf_free(struct ptbl_buf *buf) 209 { 210 211 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 212 213 mtx_lock(&ptbl_buf_freelist_lock); 214 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 215 mtx_unlock(&ptbl_buf_freelist_lock); 216 } 217 218 /* 219 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 220 */ 221 static void 222 ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 223 { 224 struct ptbl_buf *pbuf; 225 226 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 227 228 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 229 230 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 231 if (pbuf->kva == (vm_offset_t)ptbl) { 232 /* Remove from pmap ptbl buf list. */ 233 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 234 235 /* Free corresponding ptbl buf. */ 236 ptbl_buf_free(pbuf); 237 break; 238 } 239 } 240 241 /* Allocate page table. */ 242 static pte_t * 243 ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep) 244 { 245 vm_page_t mtbl[PTBL_PAGES]; 246 vm_page_t m; 247 struct ptbl_buf *pbuf; 248 unsigned int pidx; 249 pte_t *ptbl; 250 int i, j; 251 252 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 253 (pmap == kernel_pmap), pdir_idx); 254 255 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 256 ("ptbl_alloc: invalid pdir_idx")); 257 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 258 ("pte_alloc: valid ptbl entry exists!")); 259 260 pbuf = ptbl_buf_alloc(); 261 if (pbuf == NULL) 262 panic("pte_alloc: couldn't alloc kernel virtual memory"); 263 264 ptbl = (pte_t *)pbuf->kva; 265 266 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 267 268 for (i = 0; i < PTBL_PAGES; i++) { 269 pidx = (PTBL_PAGES * pdir_idx) + i; 270 while ((m = vm_page_alloc(NULL, pidx, 271 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 272 if (nosleep) { 273 ptbl_free_pmap_ptbl(pmap, ptbl); 274 for (j = 0; j < i; j++) 275 vm_page_free(mtbl[j]); 276 vm_wire_sub(i); 277 return (NULL); 278 } 279 PMAP_UNLOCK(pmap); 280 rw_wunlock(&pvh_global_lock); 281 vm_wait(NULL); 282 rw_wlock(&pvh_global_lock); 283 PMAP_LOCK(pmap); 284 } 285 mtbl[i] = m; 286 } 287 288 /* Map allocated pages into kernel_pmap. */ 289 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 290 291 /* Zero whole ptbl. */ 292 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 293 294 /* Add pbuf to the pmap ptbl bufs list. */ 295 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 296 297 return (ptbl); 298 } 299 300 /* Free ptbl pages and invalidate pdir entry. */ 301 static void 302 ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 303 { 304 pte_t *ptbl; 305 vm_paddr_t pa; 306 vm_offset_t va; 307 vm_page_t m; 308 int i; 309 310 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 311 (pmap == kernel_pmap), pdir_idx); 312 313 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 314 ("ptbl_free: invalid pdir_idx")); 315 316 ptbl = pmap->pm_pdir[pdir_idx]; 317 318 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 319 320 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 321 322 /* 323 * Invalidate the pdir entry as soon as possible, so that other CPUs 324 * don't attempt to look up the page tables we are releasing. 325 */ 326 mtx_lock_spin(&tlbivax_mutex); 327 tlb_miss_lock(); 328 329 pmap->pm_pdir[pdir_idx] = NULL; 330 331 tlb_miss_unlock(); 332 mtx_unlock_spin(&tlbivax_mutex); 333 334 for (i = 0; i < PTBL_PAGES; i++) { 335 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 336 pa = pte_vatopa(mmu, kernel_pmap, va); 337 m = PHYS_TO_VM_PAGE(pa); 338 vm_page_free_zero(m); 339 vm_wire_sub(1); 340 mmu_booke_kremove(mmu, va); 341 } 342 343 ptbl_free_pmap_ptbl(pmap, ptbl); 344 } 345 346 /* 347 * Decrement ptbl pages hold count and attempt to free ptbl pages. 348 * Called when removing pte entry from ptbl. 349 * 350 * Return 1 if ptbl pages were freed. 351 */ 352 static int 353 ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 354 { 355 pte_t *ptbl; 356 vm_paddr_t pa; 357 vm_page_t m; 358 int i; 359 360 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 361 (pmap == kernel_pmap), pdir_idx); 362 363 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 364 ("ptbl_unhold: invalid pdir_idx")); 365 KASSERT((pmap != kernel_pmap), 366 ("ptbl_unhold: unholding kernel ptbl!")); 367 368 ptbl = pmap->pm_pdir[pdir_idx]; 369 370 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 371 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 372 ("ptbl_unhold: non kva ptbl")); 373 374 /* decrement hold count */ 375 for (i = 0; i < PTBL_PAGES; i++) { 376 pa = pte_vatopa(mmu, kernel_pmap, 377 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 378 m = PHYS_TO_VM_PAGE(pa); 379 m->ref_count--; 380 } 381 382 /* 383 * Free ptbl pages if there are no pte etries in this ptbl. 384 * ref_count has the same value for all ptbl pages, so check the last 385 * page. 386 */ 387 if (m->ref_count == 0) { 388 ptbl_free(mmu, pmap, pdir_idx); 389 390 //debugf("ptbl_unhold: e (freed ptbl)\n"); 391 return (1); 392 } 393 394 return (0); 395 } 396 397 /* 398 * Increment hold count for ptbl pages. This routine is used when a new pte 399 * entry is being inserted into the ptbl. 400 */ 401 static void 402 ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 403 { 404 vm_paddr_t pa; 405 pte_t *ptbl; 406 vm_page_t m; 407 int i; 408 409 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 410 pdir_idx); 411 412 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 413 ("ptbl_hold: invalid pdir_idx")); 414 KASSERT((pmap != kernel_pmap), 415 ("ptbl_hold: holding kernel ptbl!")); 416 417 ptbl = pmap->pm_pdir[pdir_idx]; 418 419 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 420 421 for (i = 0; i < PTBL_PAGES; i++) { 422 pa = pte_vatopa(mmu, kernel_pmap, 423 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 424 m = PHYS_TO_VM_PAGE(pa); 425 m->ref_count++; 426 } 427 } 428 429 /* 430 * Clean pte entry, try to free page table page if requested. 431 * 432 * Return 1 if ptbl pages were freed, otherwise return 0. 433 */ 434 static int 435 pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 436 { 437 unsigned int pdir_idx = PDIR_IDX(va); 438 unsigned int ptbl_idx = PTBL_IDX(va); 439 vm_page_t m; 440 pte_t *ptbl; 441 pte_t *pte; 442 443 //int su = (pmap == kernel_pmap); 444 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 445 // su, (u_int32_t)pmap, va, flags); 446 447 ptbl = pmap->pm_pdir[pdir_idx]; 448 KASSERT(ptbl, ("pte_remove: null ptbl")); 449 450 pte = &ptbl[ptbl_idx]; 451 452 if (pte == NULL || !PTE_ISVALID(pte)) 453 return (0); 454 455 if (PTE_ISWIRED(pte)) 456 pmap->pm_stats.wired_count--; 457 458 /* Get vm_page_t for mapped pte. */ 459 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 460 461 /* Handle managed entry. */ 462 if (PTE_ISMANAGED(pte)) { 463 464 if (PTE_ISMODIFIED(pte)) 465 vm_page_dirty(m); 466 467 if (PTE_ISREFERENCED(pte)) 468 vm_page_aflag_set(m, PGA_REFERENCED); 469 470 pv_remove(pmap, va, m); 471 } else if (pmap == kernel_pmap && m && m->md.pv_tracked) { 472 /* 473 * Always pv_insert()/pv_remove() on MPC85XX, in case DPAA is 474 * used. This is needed by the NCSW support code for fast 475 * VA<->PA translation. 476 */ 477 pv_remove(pmap, va, m); 478 if (TAILQ_EMPTY(&m->md.pv_list)) 479 m->md.pv_tracked = false; 480 } 481 482 mtx_lock_spin(&tlbivax_mutex); 483 tlb_miss_lock(); 484 485 tlb0_flush_entry(va); 486 *pte = 0; 487 488 tlb_miss_unlock(); 489 mtx_unlock_spin(&tlbivax_mutex); 490 491 pmap->pm_stats.resident_count--; 492 493 if (flags & PTBL_UNHOLD) { 494 //debugf("pte_remove: e (unhold)\n"); 495 return (ptbl_unhold(mmu, pmap, pdir_idx)); 496 } 497 498 //debugf("pte_remove: e\n"); 499 return (0); 500 } 501 502 /* 503 * Insert PTE for a given page and virtual address. 504 */ 505 static int 506 pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags, 507 boolean_t nosleep) 508 { 509 unsigned int pdir_idx = PDIR_IDX(va); 510 unsigned int ptbl_idx = PTBL_IDX(va); 511 pte_t *ptbl, *pte, pte_tmp; 512 513 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 514 pmap == kernel_pmap, pmap, va); 515 516 /* Get the page table pointer. */ 517 ptbl = pmap->pm_pdir[pdir_idx]; 518 519 if (ptbl == NULL) { 520 /* Allocate page table pages. */ 521 ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep); 522 if (ptbl == NULL) { 523 KASSERT(nosleep, ("nosleep and NULL ptbl")); 524 return (ENOMEM); 525 } 526 pmap->pm_pdir[pdir_idx] = ptbl; 527 pte = &ptbl[ptbl_idx]; 528 } else { 529 /* 530 * Check if there is valid mapping for requested 531 * va, if there is, remove it. 532 */ 533 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 534 if (PTE_ISVALID(pte)) { 535 pte_remove(mmu, pmap, va, PTBL_HOLD); 536 } else { 537 /* 538 * pte is not used, increment hold count 539 * for ptbl pages. 540 */ 541 if (pmap != kernel_pmap) 542 ptbl_hold(mmu, pmap, pdir_idx); 543 } 544 } 545 546 /* 547 * Insert pv_entry into pv_list for mapped page if part of managed 548 * memory. 549 */ 550 if ((m->oflags & VPO_UNMANAGED) == 0) { 551 flags |= PTE_MANAGED; 552 553 /* Create and insert pv entry. */ 554 pv_insert(pmap, va, m); 555 } 556 557 pmap->pm_stats.resident_count++; 558 559 pte_tmp = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m)); 560 pte_tmp |= (PTE_VALID | flags | PTE_PS_4KB); /* 4KB pages only */ 561 562 mtx_lock_spin(&tlbivax_mutex); 563 tlb_miss_lock(); 564 565 tlb0_flush_entry(va); 566 *pte = pte_tmp; 567 568 tlb_miss_unlock(); 569 mtx_unlock_spin(&tlbivax_mutex); 570 return (0); 571 } 572 573 /* Return the pa for the given pmap/va. */ 574 static vm_paddr_t 575 pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 576 { 577 vm_paddr_t pa = 0; 578 pte_t *pte; 579 580 pte = pte_find(mmu, pmap, va); 581 if ((pte != NULL) && PTE_ISVALID(pte)) 582 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 583 return (pa); 584 } 585 586 /* Get a pointer to a PTE in a page table. */ 587 static pte_t * 588 pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 589 { 590 unsigned int pdir_idx = PDIR_IDX(va); 591 unsigned int ptbl_idx = PTBL_IDX(va); 592 593 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 594 595 if (pmap->pm_pdir[pdir_idx]) 596 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 597 598 return (NULL); 599 } 600 601 /* Set up kernel page tables. */ 602 static void 603 kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr) 604 { 605 pte_t *pte; 606 vm_offset_t va; 607 vm_offset_t pdir_start; 608 int i; 609 610 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 611 kernel_pmap->pm_pdir = (pte_t **)kernel_ptbl_root; 612 613 pdir_start = kernel_ptbl_root + PDIR_NENTRIES * sizeof(pte_t); 614 615 /* Initialize kernel pdir */ 616 for (i = 0; i < kernel_ptbls; i++) { 617 kernel_pmap->pm_pdir[kptbl_min + i] = 618 (pte_t *)(pdir_start + (i * PAGE_SIZE * PTBL_PAGES)); 619 } 620 621 /* 622 * Fill in PTEs covering kernel code and data. They are not required 623 * for address translation, as this area is covered by static TLB1 624 * entries, but for pte_vatopa() to work correctly with kernel area 625 * addresses. 626 */ 627 for (va = addr; va < data_end; va += PAGE_SIZE) { 628 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 629 powerpc_sync(); 630 *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart)); 631 *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 632 PTE_VALID | PTE_PS_4KB; 633 } 634 } 635 636 static vm_offset_t 637 mmu_booke_alloc_kernel_pgtables(vm_offset_t data_end) 638 { 639 /* Allocate space for ptbl_bufs. */ 640 ptbl_bufs = (struct ptbl_buf *)data_end; 641 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 642 debugf(" ptbl_bufs at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n", 643 (uintptr_t)ptbl_bufs, data_end); 644 645 data_end = round_page(data_end); 646 647 kernel_ptbl_root = data_end; 648 data_end += PDIR_NENTRIES * sizeof(pte_t*); 649 650 /* Allocate PTE tables for kernel KVA. */ 651 kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, 652 PDIR_SIZE); 653 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 654 debugf(" kernel ptbls: %d\n", kernel_ptbls); 655 debugf(" kernel pdir at %#jx end = %#jx\n", 656 (uintmax_t)kernel_ptbl_root, (uintmax_t)data_end); 657 658 return (data_end); 659 } 660 661 /* 662 * Initialize a preallocated and zeroed pmap structure, 663 * such as one in a vmspace structure. 664 */ 665 static void 666 mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 667 { 668 int i; 669 670 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 671 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 672 673 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 674 675 for (i = 0; i < MAXCPU; i++) 676 pmap->pm_tid[i] = TID_NONE; 677 CPU_ZERO(&kernel_pmap->pm_active); 678 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 679 pmap->pm_pdir = uma_zalloc(ptbl_root_zone, M_WAITOK); 680 bzero(pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 681 TAILQ_INIT(&pmap->pm_ptbl_list); 682 } 683 684 /* 685 * Release any resources held by the given physical map. 686 * Called when a pmap initialized by mmu_booke_pinit is being released. 687 * Should only be called if the map contains no valid mappings. 688 */ 689 static void 690 mmu_booke_release(mmu_t mmu, pmap_t pmap) 691 { 692 693 KASSERT(pmap->pm_stats.resident_count == 0, 694 ("pmap_release: pmap resident count %ld != 0", 695 pmap->pm_stats.resident_count)); 696 uma_zfree(ptbl_root_zone, pmap->pm_pdir); 697 } 698 699 static void 700 mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 701 { 702 pte_t *pte; 703 vm_paddr_t pa = 0; 704 int sync_sz, valid; 705 pmap_t pmap; 706 vm_page_t m; 707 vm_offset_t addr; 708 int active; 709 710 rw_wlock(&pvh_global_lock); 711 pmap = PCPU_GET(curpmap); 712 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; 713 while (sz > 0) { 714 PMAP_LOCK(pm); 715 pte = pte_find(mmu, pm, va); 716 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 717 if (valid) 718 pa = PTE_PA(pte); 719 PMAP_UNLOCK(pm); 720 sync_sz = PAGE_SIZE - (va & PAGE_MASK); 721 sync_sz = min(sync_sz, sz); 722 if (valid) { 723 if (!active) { 724 /* Create a mapping in the active pmap. */ 725 addr = 0; 726 m = PHYS_TO_VM_PAGE(pa); 727 PMAP_LOCK(pmap); 728 pte_enter(mmu, pmap, m, addr, 729 PTE_SR | PTE_VALID, FALSE); 730 addr += (va & PAGE_MASK); 731 __syncicache((void *)addr, sync_sz); 732 pte_remove(mmu, pmap, addr, PTBL_UNHOLD); 733 PMAP_UNLOCK(pmap); 734 } else 735 __syncicache((void *)va, sync_sz); 736 } 737 va += sync_sz; 738 sz -= sync_sz; 739 } 740 rw_wunlock(&pvh_global_lock); 741 } 742 743 /* 744 * mmu_booke_zero_page_area zeros the specified hardware page by 745 * mapping it into virtual memory and using bzero to clear 746 * its contents. 747 * 748 * off and size must reside within a single page. 749 */ 750 static void 751 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 752 { 753 vm_offset_t va; 754 755 /* XXX KASSERT off and size are within a single page? */ 756 757 mtx_lock(&zero_page_mutex); 758 va = zero_page_va; 759 760 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 761 bzero((caddr_t)va + off, size); 762 mmu_booke_kremove(mmu, va); 763 764 mtx_unlock(&zero_page_mutex); 765 } 766 767 /* 768 * mmu_booke_zero_page zeros the specified hardware page. 769 */ 770 static void 771 mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 772 { 773 vm_offset_t off, va; 774 775 va = zero_page_va; 776 mtx_lock(&zero_page_mutex); 777 778 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 779 780 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 781 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 782 783 mmu_booke_kremove(mmu, va); 784 785 mtx_unlock(&zero_page_mutex); 786 } 787 788 /* 789 * mmu_booke_copy_page copies the specified (machine independent) page by 790 * mapping the page into virtual memory and using memcopy to copy the page, 791 * one machine dependent page at a time. 792 */ 793 static void 794 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 795 { 796 vm_offset_t sva, dva; 797 798 sva = copy_page_src_va; 799 dva = copy_page_dst_va; 800 801 mtx_lock(©_page_mutex); 802 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 803 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 804 805 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 806 807 mmu_booke_kremove(mmu, dva); 808 mmu_booke_kremove(mmu, sva); 809 mtx_unlock(©_page_mutex); 810 } 811 812 static inline void 813 mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 814 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 815 { 816 void *a_cp, *b_cp; 817 vm_offset_t a_pg_offset, b_pg_offset; 818 int cnt; 819 820 mtx_lock(©_page_mutex); 821 while (xfersize > 0) { 822 a_pg_offset = a_offset & PAGE_MASK; 823 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 824 mmu_booke_kenter(mmu, copy_page_src_va, 825 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); 826 a_cp = (char *)copy_page_src_va + a_pg_offset; 827 b_pg_offset = b_offset & PAGE_MASK; 828 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 829 mmu_booke_kenter(mmu, copy_page_dst_va, 830 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); 831 b_cp = (char *)copy_page_dst_va + b_pg_offset; 832 bcopy(a_cp, b_cp, cnt); 833 mmu_booke_kremove(mmu, copy_page_dst_va); 834 mmu_booke_kremove(mmu, copy_page_src_va); 835 a_offset += cnt; 836 b_offset += cnt; 837 xfersize -= cnt; 838 } 839 mtx_unlock(©_page_mutex); 840 } 841 842 static vm_offset_t 843 mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m) 844 { 845 vm_paddr_t paddr; 846 vm_offset_t qaddr; 847 uint32_t flags; 848 pte_t *pte; 849 850 paddr = VM_PAGE_TO_PHYS(m); 851 852 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; 853 flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT; 854 flags |= PTE_PS_4KB; 855 856 critical_enter(); 857 qaddr = PCPU_GET(qmap_addr); 858 859 pte = pte_find(mmu, kernel_pmap, qaddr); 860 861 KASSERT(*pte == 0, ("mmu_booke_quick_enter_page: PTE busy")); 862 863 /* 864 * XXX: tlbivax is broadcast to other cores, but qaddr should 865 * not be present in other TLBs. Is there a better instruction 866 * sequence to use? Or just forget it & use mmu_booke_kenter()... 867 */ 868 __asm __volatile("tlbivax 0, %0" :: "r"(qaddr & MAS2_EPN_MASK)); 869 __asm __volatile("isync; msync"); 870 871 *pte = PTE_RPN_FROM_PA(paddr) | flags; 872 873 /* Flush the real memory from the instruction cache. */ 874 if ((flags & (PTE_I | PTE_G)) == 0) 875 __syncicache((void *)qaddr, PAGE_SIZE); 876 877 return (qaddr); 878 } 879 880 static void 881 mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr) 882 { 883 pte_t *pte; 884 885 pte = pte_find(mmu, kernel_pmap, addr); 886 887 KASSERT(PCPU_GET(qmap_addr) == addr, 888 ("mmu_booke_quick_remove_page: invalid address")); 889 KASSERT(*pte != 0, 890 ("mmu_booke_quick_remove_page: PTE not in use")); 891 892 *pte = 0; 893 critical_exit(); 894 } 895 896 /**************************************************************************/ 897 /* TID handling */ 898 /**************************************************************************/ 899 900 /* 901 * Return the largest uint value log such that 2^log <= num. 902 */ 903 static unsigned long 904 ilog2(unsigned long num) 905 { 906 long lz; 907 908 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 909 return (31 - lz); 910 } 911 912 /* 913 * Invalidate all TLB0 entries which match the given TID. Note this is 914 * dedicated for cases when invalidations should NOT be propagated to other 915 * CPUs. 916 */ 917 static void 918 tid_flush(tlbtid_t tid) 919 { 920 register_t msr; 921 uint32_t mas0, mas1, mas2; 922 int entry, way; 923 924 925 /* Don't evict kernel translations */ 926 if (tid == TID_KERNEL) 927 return; 928 929 msr = mfmsr(); 930 __asm __volatile("wrteei 0"); 931 932 /* 933 * Newer (e500mc and later) have tlbilx, which doesn't broadcast, so use 934 * it for PID invalidation. 935 */ 936 switch ((mfpvr() >> 16) & 0xffff) { 937 case FSL_E500mc: 938 case FSL_E5500: 939 case FSL_E6500: 940 mtspr(SPR_MAS6, tid << MAS6_SPID0_SHIFT); 941 /* tlbilxpid */ 942 __asm __volatile("isync; .long 0x7c200024; isync; msync"); 943 __asm __volatile("wrtee %0" :: "r"(msr)); 944 return; 945 } 946 947 for (way = 0; way < TLB0_WAYS; way++) 948 for (entry = 0; entry < TLB0_ENTRIES_PER_WAY; entry++) { 949 950 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 951 mtspr(SPR_MAS0, mas0); 952 953 mas2 = entry << MAS2_TLB0_ENTRY_IDX_SHIFT; 954 mtspr(SPR_MAS2, mas2); 955 956 __asm __volatile("isync; tlbre"); 957 958 mas1 = mfspr(SPR_MAS1); 959 960 if (!(mas1 & MAS1_VALID)) 961 continue; 962 if (((mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT) != tid) 963 continue; 964 mas1 &= ~MAS1_VALID; 965 mtspr(SPR_MAS1, mas1); 966 __asm __volatile("isync; tlbwe; isync; msync"); 967 } 968 __asm __volatile("wrtee %0" :: "r"(msr)); 969 } 970