1 /* $NetBSD: pmap.c,v 1.65 2011/01/14 02:06:29 rmind Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 40 * Copyright (C) 1995, 1996 TooLs GmbH. 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by TooLs GmbH. 54 * 4. The name of TooLs GmbH may not be used to endorse or promote products 55 * derived from this software without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 */ 68 69 #include <sys/cdefs.h> 70 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.65 2011/01/14 02:06:29 rmind Exp $"); 71 72 #include <sys/param.h> 73 #include <sys/malloc.h> 74 #include <sys/proc.h> 75 #include <sys/queue.h> 76 #include <sys/systm.h> 77 #include <sys/pool.h> 78 #include <sys/device.h> 79 80 #include <uvm/uvm.h> 81 82 #include <machine/cpu.h> 83 #include <machine/pcb.h> 84 #include <machine/powerpc.h> 85 86 #include <powerpc/spr.h> 87 #include <powerpc/ibm4xx/spr.h> 88 #include <machine/tlb.h> 89 90 /* 91 * kernmap is an array of PTEs large enough to map in 92 * 4GB. At 16KB/page it is 256K entries or 2MB. 93 */ 94 #define KERNMAP_SIZE ((0xffffffffU/PAGE_SIZE)+1) 95 void *kernmap; 96 97 #define MINCTX 2 98 #define NUMCTX 256 99 100 volatile struct pmap *ctxbusy[NUMCTX]; 101 102 #define TLBF_USED 0x1 103 #define TLBF_REF 0x2 104 #define TLBF_LOCKED 0x4 105 #define TLB_LOCKED(i) (tlb_info[(i)].ti_flags & TLBF_LOCKED) 106 107 typedef struct tlb_info_s { 108 char ti_flags; 109 char ti_ctx; /* TLB_PID assiciated with the entry */ 110 u_int ti_va; 111 } tlb_info_t; 112 113 volatile tlb_info_t tlb_info[NTLB]; 114 /* We'll use a modified FIFO replacement policy cause it's cheap */ 115 volatile int tlbnext; 116 117 static int tlb_nreserved = 0; 118 static int pmap_bootstrap_done = 0; 119 120 /* Event counters */ 121 struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, 122 NULL, "cpu", "tlbmiss"); 123 struct evcnt tlbhit_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, 124 NULL, "cpu", "tlbhit"); 125 struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, 126 NULL, "cpu", "tlbflush"); 127 struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, 128 NULL, "cpu", "tlbenter"); 129 130 struct pmap kernel_pmap_; 131 struct pmap *const kernel_pmap_ptr = &kernel_pmap_; 132 133 static int npgs; 134 static u_int nextavail; 135 #ifndef MSGBUFADDR 136 extern paddr_t msgbuf_paddr; 137 #endif 138 139 static struct mem_region *mem, *avail; 140 141 /* 142 * This is a cache of referenced/modified bits. 143 * Bits herein are shifted by ATTRSHFT. 144 */ 145 static char *pmap_attrib; 146 147 #define PV_WIRED 0x1 148 #define PV_WIRE(pv) ((pv)->pv_va |= PV_WIRED) 149 #define PV_UNWIRE(pv) ((pv)->pv_va &= ~PV_WIRED) 150 #define PV_ISWIRED(pv) ((pv)->pv_va & PV_WIRED) 151 #define PV_CMPVA(va,pv) (!(((pv)->pv_va ^ (va)) & (~PV_WIRED))) 152 153 struct pv_entry { 154 struct pv_entry *pv_next; /* Linked list of mappings */ 155 vaddr_t pv_va; /* virtual address of mapping */ 156 struct pmap *pv_pm; 157 }; 158 159 /* Each index corresponds to TLB_SIZE_* value. */ 160 static size_t tlbsize[] = { 161 1024, /* TLB_SIZE_1K */ 162 4096, /* TLB_SIZE_4K */ 163 16384, /* TLB_SIZE_16K */ 164 65536, /* TLB_SIZE_64K */ 165 262144, /* TLB_SIZE_256K */ 166 1048576, /* TLB_SIZE_1M */ 167 4194304, /* TLB_SIZE_4M */ 168 16777216, /* TLB_SIZE_16M */ 169 }; 170 171 struct pv_entry *pv_table; 172 static struct pool pv_pool; 173 174 static int pmap_initialized; 175 176 static int ctx_flush(int); 177 178 inline struct pv_entry *pa_to_pv(paddr_t); 179 static inline char *pa_to_attr(paddr_t); 180 181 static inline volatile u_int *pte_find(struct pmap *, vaddr_t); 182 static inline int pte_enter(struct pmap *, vaddr_t, u_int); 183 184 static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, int); 185 static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t); 186 187 static int ppc4xx_tlb_size_mask(size_t, int *, int *); 188 189 190 inline struct pv_entry * 191 pa_to_pv(paddr_t pa) 192 { 193 int bank, pg; 194 195 bank = vm_physseg_find(atop(pa), &pg); 196 if (bank == -1) 197 return NULL; 198 return &VM_PHYSMEM_PTR(bank)->pmseg.pvent[pg]; 199 } 200 201 static inline char * 202 pa_to_attr(paddr_t pa) 203 { 204 int bank, pg; 205 206 bank = vm_physseg_find(atop(pa), &pg); 207 if (bank == -1) 208 return NULL; 209 return &VM_PHYSMEM_PTR(bank)->pmseg.attrs[pg]; 210 } 211 212 /* 213 * Insert PTE into page table. 214 */ 215 int 216 pte_enter(struct pmap *pm, vaddr_t va, u_int pte) 217 { 218 int seg = STIDX(va); 219 int ptn = PTIDX(va); 220 u_int oldpte; 221 222 if (!pm->pm_ptbl[seg]) { 223 /* Don't allocate a page to clear a non-existent mapping. */ 224 if (!pte) 225 return (0); 226 /* Allocate a page XXXX this will sleep! */ 227 pm->pm_ptbl[seg] = 228 (uint *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 229 UVM_KMF_WIRED | UVM_KMF_ZERO); 230 } 231 oldpte = pm->pm_ptbl[seg][ptn]; 232 pm->pm_ptbl[seg][ptn] = pte; 233 234 /* Flush entry. */ 235 ppc4xx_tlb_flush(va, pm->pm_ctx); 236 if (oldpte != pte) { 237 if (pte == 0) 238 pm->pm_stats.resident_count--; 239 else 240 pm->pm_stats.resident_count++; 241 } 242 return (1); 243 } 244 245 /* 246 * Get a pointer to a PTE in a page table. 247 */ 248 volatile u_int * 249 pte_find(struct pmap *pm, vaddr_t va) 250 { 251 int seg = STIDX(va); 252 int ptn = PTIDX(va); 253 254 if (pm->pm_ptbl[seg]) 255 return (&pm->pm_ptbl[seg][ptn]); 256 257 return (NULL); 258 } 259 260 /* 261 * This is called during initppc, before the system is really initialized. 262 */ 263 void 264 pmap_bootstrap(u_int kernelstart, u_int kernelend) 265 { 266 struct mem_region *mp, *mp1; 267 int cnt, i; 268 u_int s, e, sz; 269 270 tlbnext = tlb_nreserved; 271 272 /* 273 * Allocate the kernel page table at the end of 274 * kernel space so it's in the locked TTE. 275 */ 276 kernmap = (void *)kernelend; 277 278 /* 279 * Initialize kernel page table. 280 */ 281 for (i = 0; i < STSZ; i++) { 282 pmap_kernel()->pm_ptbl[i] = 0; 283 } 284 ctxbusy[0] = ctxbusy[1] = pmap_kernel(); 285 286 /* 287 * Announce page-size to the VM-system 288 */ 289 uvmexp.pagesize = NBPG; 290 uvm_setpagesize(); 291 292 /* 293 * Get memory. 294 */ 295 mem_regions(&mem, &avail); 296 for (mp = mem; mp->size; mp++) { 297 physmem += btoc(mp->size); 298 printf("+%lx,",mp->size); 299 } 300 printf("\n"); 301 ppc4xx_tlb_init(); 302 /* 303 * Count the number of available entries. 304 */ 305 for (cnt = 0, mp = avail; mp->size; mp++) 306 cnt++; 307 308 /* 309 * Page align all regions. 310 * Non-page aligned memory isn't very interesting to us. 311 * Also, sort the entries for ascending addresses. 312 */ 313 kernelstart &= ~PGOFSET; 314 kernelend = (kernelend + PGOFSET) & ~PGOFSET; 315 for (mp = avail; mp->size; mp++) { 316 s = mp->start; 317 e = mp->start + mp->size; 318 printf("%08x-%08x -> ",s,e); 319 /* 320 * Check whether this region holds all of the kernel. 321 */ 322 if (s < kernelstart && e > kernelend) { 323 avail[cnt].start = kernelend; 324 avail[cnt++].size = e - kernelend; 325 e = kernelstart; 326 } 327 /* 328 * Look whether this regions starts within the kernel. 329 */ 330 if (s >= kernelstart && s < kernelend) { 331 if (e <= kernelend) 332 goto empty; 333 s = kernelend; 334 } 335 /* 336 * Now look whether this region ends within the kernel. 337 */ 338 if (e > kernelstart && e <= kernelend) { 339 if (s >= kernelstart) 340 goto empty; 341 e = kernelstart; 342 } 343 /* 344 * Now page align the start and size of the region. 345 */ 346 s = round_page(s); 347 e = trunc_page(e); 348 if (e < s) 349 e = s; 350 sz = e - s; 351 printf("%08x-%08x = %x\n",s,e,sz); 352 /* 353 * Check whether some memory is left here. 354 */ 355 if (sz == 0) { 356 empty: 357 memmove(mp, mp + 1, 358 (cnt - (mp - avail)) * sizeof *mp); 359 cnt--; 360 mp--; 361 continue; 362 } 363 /* 364 * Do an insertion sort. 365 */ 366 npgs += btoc(sz); 367 for (mp1 = avail; mp1 < mp; mp1++) 368 if (s < mp1->start) 369 break; 370 if (mp1 < mp) { 371 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 372 mp1->start = s; 373 mp1->size = sz; 374 } else { 375 mp->start = s; 376 mp->size = sz; 377 } 378 } 379 380 /* 381 * We cannot do pmap_steal_memory here, 382 * since we don't run with translation enabled yet. 383 */ 384 #ifndef MSGBUFADDR 385 /* 386 * allow for msgbuf 387 */ 388 sz = round_page(MSGBUFSIZE); 389 mp = NULL; 390 for (mp1 = avail; mp1->size; mp1++) 391 if (mp1->size >= sz) 392 mp = mp1; 393 if (mp == NULL) 394 panic("not enough memory?"); 395 396 npgs -= btoc(sz); 397 msgbuf_paddr = mp->start + mp->size - sz; 398 mp->size -= sz; 399 if (mp->size <= 0) 400 memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof *mp); 401 #endif 402 403 for (mp = avail; mp->size; mp++) 404 uvm_page_physload(atop(mp->start), atop(mp->start + mp->size), 405 atop(mp->start), atop(mp->start + mp->size), 406 VM_FREELIST_DEFAULT); 407 408 /* 409 * Initialize kernel pmap and hardware. 410 */ 411 /* Setup TLB pid allocator so it knows we alreadu using PID 1 */ 412 pmap_kernel()->pm_ctx = KERNEL_PID; 413 nextavail = avail->start; 414 415 evcnt_attach_static(&tlbmiss_ev); 416 evcnt_attach_static(&tlbhit_ev); 417 evcnt_attach_static(&tlbflush_ev); 418 evcnt_attach_static(&tlbenter_ev); 419 420 pmap_bootstrap_done = 1; 421 } 422 423 /* 424 * Restrict given range to physical memory 425 * 426 * (Used by /dev/mem) 427 */ 428 void 429 pmap_real_memory(paddr_t *start, psize_t *size) 430 { 431 struct mem_region *mp; 432 433 for (mp = mem; mp->size; mp++) { 434 if (*start + *size > mp->start && 435 *start < mp->start + mp->size) { 436 if (*start < mp->start) { 437 *size -= mp->start - *start; 438 *start = mp->start; 439 } 440 if (*start + *size > mp->start + mp->size) 441 *size = mp->start + mp->size - *start; 442 return; 443 } 444 } 445 *size = 0; 446 } 447 448 /* 449 * Initialize anything else for pmap handling. 450 * Called during vm_init(). 451 */ 452 void 453 pmap_init(void) 454 { 455 struct pv_entry *pv; 456 vsize_t sz; 457 vaddr_t addr; 458 int i, s; 459 int bank; 460 char *attr; 461 462 sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs); 463 sz = round_page(sz); 464 addr = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED | UVM_KMF_ZERO); 465 s = splvm(); 466 pv = pv_table = (struct pv_entry *)addr; 467 for (i = npgs; --i >= 0;) 468 pv++->pv_pm = NULL; 469 pmap_attrib = (char *)pv; 470 memset(pv, 0, npgs); 471 472 pv = pv_table; 473 attr = pmap_attrib; 474 for (bank = 0; bank < vm_nphysseg; bank++) { 475 sz = VM_PHYSMEM_PTR(bank)->end - VM_PHYSMEM_PTR(bank)->start; 476 VM_PHYSMEM_PTR(bank)->pmseg.pvent = pv; 477 VM_PHYSMEM_PTR(bank)->pmseg.attrs = attr; 478 pv += sz; 479 attr += sz; 480 } 481 482 pmap_initialized = 1; 483 splx(s); 484 485 /* Setup a pool for additional pvlist structures */ 486 pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", NULL, 487 IPL_VM); 488 } 489 490 /* 491 * How much virtual space is available to the kernel? 492 */ 493 void 494 pmap_virtual_space(vaddr_t *start, vaddr_t *end) 495 { 496 497 #if 0 498 /* 499 * Reserve one segment for kernel virtual memory 500 */ 501 *start = (vaddr_t)(KERNEL_SR << ADDR_SR_SHFT); 502 *end = *start + SEGMENT_LENGTH; 503 #else 504 *start = (vaddr_t) VM_MIN_KERNEL_ADDRESS; 505 *end = (vaddr_t) VM_MAX_KERNEL_ADDRESS; 506 #endif 507 } 508 509 #ifdef PMAP_GROWKERNEL 510 /* 511 * Preallocate kernel page tables to a specified VA. 512 * This simply loops through the first TTE for each 513 * page table from the beginning of the kernel pmap, 514 * reads the entry, and if the result is 515 * zero (either invalid entry or no page table) it stores 516 * a zero there, populating page tables in the process. 517 * This is not the most efficient technique but i don't 518 * expect it to be called that often. 519 */ 520 extern struct vm_page *vm_page_alloc1(void); 521 extern void vm_page_free1(struct vm_page *); 522 523 vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS; 524 525 vaddr_t 526 pmap_growkernel(vaddr_t maxkvaddr) 527 { 528 int s; 529 int seg; 530 paddr_t pg; 531 struct pmap *pm = pmap_kernel(); 532 533 s = splvm(); 534 535 /* Align with the start of a page table */ 536 for (kbreak &= ~(PTMAP-1); kbreak < maxkvaddr; 537 kbreak += PTMAP) { 538 seg = STIDX(kbreak); 539 540 if (pte_find(pm, kbreak)) 541 continue; 542 543 if (uvm.page_init_done) { 544 pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1()); 545 } else { 546 if (!uvm_page_physget(&pg)) 547 panic("pmap_growkernel: no memory"); 548 } 549 if (!pg) 550 panic("pmap_growkernel: no pages"); 551 pmap_zero_page((paddr_t)pg); 552 553 /* XXX This is based on all phymem being addressable */ 554 pm->pm_ptbl[seg] = (u_int *)pg; 555 } 556 splx(s); 557 return (kbreak); 558 } 559 560 /* 561 * vm_page_alloc1: 562 * 563 * Allocate and return a memory cell with no associated object. 564 */ 565 struct vm_page * 566 vm_page_alloc1(void) 567 { 568 struct vm_page *pg; 569 570 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 571 if (pg) { 572 pg->wire_count = 1; /* no mappings yet */ 573 pg->flags &= ~PG_BUSY; /* never busy */ 574 } 575 return pg; 576 } 577 578 /* 579 * vm_page_free1: 580 * 581 * Returns the given page to the free list, 582 * disassociating it with any VM object. 583 * 584 * Object and page must be locked prior to entry. 585 */ 586 void 587 vm_page_free1(struct vm_page *pg) 588 { 589 #ifdef DIAGNOSTIC 590 if (pg->flags != (PG_CLEAN|PG_FAKE)) { 591 printf("Freeing invalid page %p\n", pg); 592 printf("pa = %llx\n", (unsigned long long)VM_PAGE_TO_PHYS(pg)); 593 #ifdef DDB 594 Debugger(); 595 #endif 596 return; 597 } 598 #endif 599 pg->flags |= PG_BUSY; 600 pg->wire_count = 0; 601 uvm_pagefree(pg); 602 } 603 #endif 604 605 /* 606 * Create and return a physical map. 607 */ 608 struct pmap * 609 pmap_create(void) 610 { 611 struct pmap *pm; 612 613 pm = malloc(sizeof *pm, M_VMPMAP, M_WAITOK); 614 memset(pm, 0, sizeof *pm); 615 pm->pm_refs = 1; 616 return pm; 617 } 618 619 /* 620 * Add a reference to the given pmap. 621 */ 622 void 623 pmap_reference(struct pmap *pm) 624 { 625 626 pm->pm_refs++; 627 } 628 629 /* 630 * Retire the given pmap from service. 631 * Should only be called if the map contains no valid mappings. 632 */ 633 void 634 pmap_destroy(struct pmap *pm) 635 { 636 int i; 637 638 if (--pm->pm_refs > 0) { 639 return; 640 } 641 KASSERT(pm->pm_stats.resident_count == 0); 642 KASSERT(pm->pm_stats.wired_count == 0); 643 for (i = 0; i < STSZ; i++) 644 if (pm->pm_ptbl[i]) { 645 uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i], 646 PAGE_SIZE, UVM_KMF_WIRED); 647 pm->pm_ptbl[i] = NULL; 648 } 649 if (pm->pm_ctx) 650 ctx_free(pm); 651 free(pm, M_VMPMAP); 652 } 653 654 /* 655 * Copy the range specified by src_addr/len 656 * from the source map to the range dst_addr/len 657 * in the destination map. 658 * 659 * This routine is only advisory and need not do anything. 660 */ 661 void 662 pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr, 663 vsize_t len, vaddr_t src_addr) 664 { 665 } 666 667 /* 668 * Require that all active physical maps contain no 669 * incorrect entries NOW. 670 */ 671 void 672 pmap_update(struct pmap *pmap) 673 { 674 } 675 676 /* 677 * Fill the given physical page with zeroes. 678 */ 679 void 680 pmap_zero_page(paddr_t pa) 681 { 682 683 #ifdef PPC_4XX_NOCACHE 684 memset((void *)pa, 0, PAGE_SIZE); 685 #else 686 int i; 687 688 for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) { 689 __asm volatile ("dcbz 0,%0" :: "r"(pa)); 690 pa += CACHELINESIZE; 691 } 692 #endif 693 } 694 695 /* 696 * Copy the given physical source page to its destination. 697 */ 698 void 699 pmap_copy_page(paddr_t src, paddr_t dst) 700 { 701 702 memcpy((void *)dst, (void *)src, PAGE_SIZE); 703 dcache_flush_page(dst); 704 } 705 706 /* 707 * This returns != 0 on success. 708 */ 709 static inline int 710 pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa, int flags) 711 { 712 struct pv_entry *pv, *npv = NULL; 713 int s; 714 715 if (!pmap_initialized) 716 return 0; 717 718 s = splvm(); 719 pv = pa_to_pv(pa); 720 if (!pv->pv_pm) { 721 /* 722 * No entries yet, use header as the first entry. 723 */ 724 pv->pv_va = va; 725 pv->pv_pm = pm; 726 pv->pv_next = NULL; 727 } else { 728 /* 729 * There is at least one other VA mapping this page. 730 * Place this entry after the header. 731 */ 732 npv = pool_get(&pv_pool, PR_NOWAIT); 733 if (npv == NULL) { 734 if ((flags & PMAP_CANFAIL) == 0) 735 panic("pmap_enter_pv: failed"); 736 splx(s); 737 return 0; 738 } 739 npv->pv_va = va; 740 npv->pv_pm = pm; 741 npv->pv_next = pv->pv_next; 742 pv->pv_next = npv; 743 pv = npv; 744 } 745 if (flags & PMAP_WIRED) { 746 PV_WIRE(pv); 747 pm->pm_stats.wired_count++; 748 } 749 splx(s); 750 return (1); 751 } 752 753 static void 754 pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa) 755 { 756 struct pv_entry *pv, *npv; 757 758 /* 759 * Remove from the PV table. 760 */ 761 pv = pa_to_pv(pa); 762 if (!pv) 763 return; 764 765 /* 766 * If it is the first entry on the list, it is actually 767 * in the header and we must copy the following entry up 768 * to the header. Otherwise we must search the list for 769 * the entry. In either case we free the now unused entry. 770 */ 771 if (pm == pv->pv_pm && PV_CMPVA(va, pv)) { 772 if (PV_ISWIRED(pv)) { 773 pm->pm_stats.wired_count--; 774 } 775 if ((npv = pv->pv_next)) { 776 *pv = *npv; 777 pool_put(&pv_pool, npv); 778 } else 779 pv->pv_pm = NULL; 780 } else { 781 for (; (npv = pv->pv_next) != NULL; pv = npv) 782 if (pm == npv->pv_pm && PV_CMPVA(va, npv)) 783 break; 784 if (npv) { 785 pv->pv_next = npv->pv_next; 786 if (PV_ISWIRED(npv)) { 787 pm->pm_stats.wired_count--; 788 } 789 pool_put(&pv_pool, npv); 790 } 791 } 792 } 793 794 /* 795 * Insert physical page at pa into the given pmap at virtual address va. 796 */ 797 int 798 pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 799 { 800 int s; 801 u_int tte; 802 bool managed; 803 804 /* 805 * Have to remove any existing mapping first. 806 */ 807 pmap_remove(pm, va, va + PAGE_SIZE); 808 809 if (flags & PMAP_WIRED) 810 flags |= prot; 811 812 managed = uvm_pageismanaged(pa); 813 814 /* 815 * Generate TTE. 816 */ 817 tte = TTE_PA(pa); 818 /* XXXX -- need to support multiple page sizes. */ 819 tte |= TTE_SZ_16K; 820 #ifdef DIAGNOSTIC 821 if ((flags & (PME_NOCACHE | PME_WRITETHROUG)) == 822 (PME_NOCACHE | PME_WRITETHROUG)) 823 panic("pmap_enter: uncached & writethrough"); 824 #endif 825 if (flags & PME_NOCACHE) 826 /* Must be I/O mapping */ 827 tte |= TTE_I | TTE_G; 828 #ifdef PPC_4XX_NOCACHE 829 tte |= TTE_I; 830 #else 831 else if (flags & PME_WRITETHROUG) 832 /* Uncached and writethrough are not compatible */ 833 tte |= TTE_W; 834 #endif 835 if (pm == pmap_kernel()) 836 tte |= TTE_ZONE(ZONE_PRIV); 837 else 838 tte |= TTE_ZONE(ZONE_USER); 839 840 if (flags & VM_PROT_WRITE) 841 tte |= TTE_WR; 842 843 if (flags & VM_PROT_EXECUTE) 844 tte |= TTE_EX; 845 846 /* 847 * Now record mapping for later back-translation. 848 */ 849 if (pmap_initialized && managed) { 850 char *attr; 851 852 if (!pmap_enter_pv(pm, va, pa, flags)) { 853 /* Could not enter pv on a managed page */ 854 return 1; 855 } 856 857 /* Now set attributes. */ 858 attr = pa_to_attr(pa); 859 #ifdef DIAGNOSTIC 860 if (!attr) 861 panic("managed but no attr"); 862 #endif 863 if (flags & VM_PROT_ALL) 864 *attr |= PMAP_ATTR_REF; 865 if (flags & VM_PROT_WRITE) 866 *attr |= PMAP_ATTR_CHG; 867 } 868 869 s = splvm(); 870 871 /* Insert page into page table. */ 872 pte_enter(pm, va, tte); 873 874 /* If this is a real fault, enter it in the tlb */ 875 if (tte && ((flags & PMAP_WIRED) == 0)) { 876 ppc4xx_tlb_enter(pm->pm_ctx, va, tte); 877 } 878 splx(s); 879 880 /* Flush the real memory from the instruction cache. */ 881 if ((prot & VM_PROT_EXECUTE) && (tte & TTE_I) == 0) 882 __syncicache((void *)pa, PAGE_SIZE); 883 884 return 0; 885 } 886 887 void 888 pmap_unwire(struct pmap *pm, vaddr_t va) 889 { 890 struct pv_entry *pv; 891 paddr_t pa; 892 int s; 893 894 if (!pmap_extract(pm, va, &pa)) { 895 return; 896 } 897 898 pv = pa_to_pv(pa); 899 if (!pv) 900 return; 901 902 s = splvm(); 903 while (pv != NULL) { 904 if (pm == pv->pv_pm && PV_CMPVA(va, pv)) { 905 if (PV_ISWIRED(pv)) { 906 PV_UNWIRE(pv); 907 pm->pm_stats.wired_count--; 908 } 909 break; 910 } 911 pv = pv->pv_next; 912 } 913 splx(s); 914 } 915 916 void 917 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 918 { 919 int s; 920 u_int tte; 921 struct pmap *pm = pmap_kernel(); 922 923 /* 924 * Have to remove any existing mapping first. 925 */ 926 927 /* 928 * Generate TTE. 929 * 930 * XXXX 931 * 932 * Since the kernel does not handle execution privileges properly, 933 * we will handle read and execute permissions together. 934 */ 935 tte = 0; 936 if (prot & VM_PROT_ALL) { 937 938 tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV); 939 /* XXXX -- need to support multiple page sizes. */ 940 tte |= TTE_SZ_16K; 941 #ifdef DIAGNOSTIC 942 if ((prot & (PME_NOCACHE | PME_WRITETHROUG)) == 943 (PME_NOCACHE | PME_WRITETHROUG)) 944 panic("pmap_kenter_pa: uncached & writethrough"); 945 #endif 946 if (prot & PME_NOCACHE) 947 /* Must be I/O mapping */ 948 tte |= TTE_I | TTE_G; 949 #ifdef PPC_4XX_NOCACHE 950 tte |= TTE_I; 951 #else 952 else if (prot & PME_WRITETHROUG) 953 /* Uncached and writethrough are not compatible */ 954 tte |= TTE_W; 955 #endif 956 if (prot & VM_PROT_WRITE) 957 tte |= TTE_WR; 958 } 959 960 s = splvm(); 961 962 /* Insert page into page table. */ 963 pte_enter(pm, va, tte); 964 splx(s); 965 } 966 967 void 968 pmap_kremove(vaddr_t va, vsize_t len) 969 { 970 971 while (len > 0) { 972 pte_enter(pmap_kernel(), va, 0); 973 va += PAGE_SIZE; 974 len -= PAGE_SIZE; 975 } 976 } 977 978 /* 979 * Remove the given range of mapping entries. 980 */ 981 void 982 pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) 983 { 984 int s; 985 paddr_t pa; 986 volatile u_int *ptp; 987 988 s = splvm(); 989 while (va < endva) { 990 991 if ((ptp = pte_find(pm, va)) && (pa = *ptp)) { 992 pa = TTE_PA(pa); 993 pmap_remove_pv(pm, va, pa); 994 *ptp = 0; 995 ppc4xx_tlb_flush(va, pm->pm_ctx); 996 pm->pm_stats.resident_count--; 997 } 998 va += PAGE_SIZE; 999 } 1000 1001 splx(s); 1002 } 1003 1004 /* 1005 * Get the physical page address for the given pmap/virtual address. 1006 */ 1007 bool 1008 pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap) 1009 { 1010 int seg = STIDX(va); 1011 int ptn = PTIDX(va); 1012 u_int pa = 0; 1013 int s; 1014 1015 s = splvm(); 1016 if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn])) { 1017 *pap = TTE_PA(pa) | (va & PGOFSET); 1018 } 1019 splx(s); 1020 return (pa != 0); 1021 } 1022 1023 /* 1024 * Lower the protection on the specified range of this pmap. 1025 * 1026 * There are only two cases: either the protection is going to 0, 1027 * or it is going to read-only. 1028 */ 1029 void 1030 pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 1031 { 1032 volatile u_int *ptp; 1033 int s, bic; 1034 1035 if ((prot & VM_PROT_READ) == 0) { 1036 pmap_remove(pm, sva, eva); 1037 return; 1038 } 1039 bic = 0; 1040 if ((prot & VM_PROT_WRITE) == 0) { 1041 bic |= TTE_WR; 1042 } 1043 if ((prot & VM_PROT_EXECUTE) == 0) { 1044 bic |= TTE_EX; 1045 } 1046 if (bic == 0) { 1047 return; 1048 } 1049 s = splvm(); 1050 while (sva < eva) { 1051 if ((ptp = pte_find(pm, sva)) != NULL) { 1052 *ptp &= ~bic; 1053 ppc4xx_tlb_flush(sva, pm->pm_ctx); 1054 } 1055 sva += PAGE_SIZE; 1056 } 1057 splx(s); 1058 } 1059 1060 bool 1061 pmap_check_attr(struct vm_page *pg, u_int mask, int clear) 1062 { 1063 paddr_t pa; 1064 char *attr; 1065 int s, rv; 1066 1067 /* 1068 * First modify bits in cache. 1069 */ 1070 pa = VM_PAGE_TO_PHYS(pg); 1071 attr = pa_to_attr(pa); 1072 if (attr == NULL) 1073 return false; 1074 1075 s = splvm(); 1076 rv = ((*attr & mask) != 0); 1077 if (clear) { 1078 *attr &= ~mask; 1079 pmap_page_protect(pg, mask == PMAP_ATTR_CHG ? VM_PROT_READ : 0); 1080 } 1081 splx(s); 1082 return rv; 1083 } 1084 1085 1086 /* 1087 * Lower the protection on the specified physical page. 1088 * 1089 * There are only two cases: either the protection is going to 0, 1090 * or it is going to read-only. 1091 */ 1092 void 1093 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 1094 { 1095 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1096 vaddr_t va; 1097 struct pv_entry *pvh, *pv, *npv; 1098 struct pmap *pm; 1099 1100 pvh = pa_to_pv(pa); 1101 if (pvh == NULL) 1102 return; 1103 1104 /* Handle extra pvs which may be deleted in the operation */ 1105 for (pv = pvh->pv_next; pv; pv = npv) { 1106 npv = pv->pv_next; 1107 1108 pm = pv->pv_pm; 1109 va = pv->pv_va; 1110 pmap_protect(pm, va, va + PAGE_SIZE, prot); 1111 } 1112 /* Now check the head pv */ 1113 if (pvh->pv_pm) { 1114 pv = pvh; 1115 pm = pv->pv_pm; 1116 va = pv->pv_va; 1117 pmap_protect(pm, va, va + PAGE_SIZE, prot); 1118 } 1119 } 1120 1121 /* 1122 * Activate the address space for the specified process. If the process 1123 * is the current process, load the new MMU context. 1124 */ 1125 void 1126 pmap_activate(struct lwp *l) 1127 { 1128 #if 0 1129 struct pcb *pcb = lwp_getpcb(l); 1130 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 1131 1132 /* 1133 * XXX Normally performed in cpu_lwp_fork(). 1134 */ 1135 printf("pmap_activate(%p), pmap=%p\n",l,pmap); 1136 pcb->pcb_pm = pmap; 1137 #endif 1138 } 1139 1140 /* 1141 * Deactivate the specified process's address space. 1142 */ 1143 void 1144 pmap_deactivate(struct lwp *l) 1145 { 1146 } 1147 1148 /* 1149 * Synchronize caches corresponding to [addr, addr+len) in p. 1150 */ 1151 void 1152 pmap_procwr(struct proc *p, vaddr_t va, size_t len) 1153 { 1154 struct pmap *pm = p->p_vmspace->vm_map.pmap; 1155 int msr, ctx, opid, step; 1156 1157 step = CACHELINESIZE; 1158 1159 /* 1160 * Need to turn off IMMU and switch to user context. 1161 * (icbi uses DMMU). 1162 */ 1163 if (!(ctx = pm->pm_ctx)) { 1164 /* No context -- assign it one */ 1165 ctx_alloc(pm); 1166 ctx = pm->pm_ctx; 1167 } 1168 __asm volatile("mfmsr %0;" 1169 "li %1, %7;" 1170 "andc %1,%0,%1;" 1171 "mtmsr %1;" 1172 "sync;isync;" 1173 "mfpid %1;" 1174 "mtpid %2;" 1175 "sync; isync;" 1176 "1:" 1177 "dcbf 0,%3;" 1178 "icbi 0,%3;" 1179 "add %3,%3,%5;" 1180 "addc. %4,%4,%6;" 1181 "bge 1b;" 1182 "mtpid %1;" 1183 "mtmsr %0;" 1184 "sync; isync" 1185 : "=&r" (msr), "=&r" (opid) 1186 : "r" (ctx), "r" (va), "r" (len), "r" (step), "r" (-step), 1187 "K" (PSL_IR | PSL_DR)); 1188 } 1189 1190 1191 /* This has to be done in real mode !!! */ 1192 void 1193 ppc4xx_tlb_flush(vaddr_t va, int pid) 1194 { 1195 u_long i, found; 1196 u_long msr; 1197 1198 /* If there's no context then it can't be mapped. */ 1199 if (!pid) 1200 return; 1201 1202 __asm( "mfpid %1;" /* Save PID */ 1203 "mfmsr %2;" /* Save MSR */ 1204 "li %0,0;" /* Now clear MSR */ 1205 "mtmsr %0;" 1206 "mtpid %4;" /* Set PID */ 1207 "sync;" 1208 "tlbsx. %0,0,%3;" /* Search TLB */ 1209 "sync;" 1210 "mtpid %1;" /* Restore PID */ 1211 "mtmsr %2;" /* Restore MSR */ 1212 "sync;isync;" 1213 "li %1,1;" 1214 "beq 1f;" 1215 "li %1,0;" 1216 "1:" 1217 : "=&r" (i), "=&r" (found), "=&r" (msr) 1218 : "r" (va), "r" (pid)); 1219 if (found && !TLB_LOCKED(i)) { 1220 1221 /* Now flush translation */ 1222 __asm volatile( 1223 "tlbwe %0,%1,0;" 1224 "sync;isync;" 1225 : : "r" (0), "r" (i)); 1226 1227 tlb_info[i].ti_ctx = 0; 1228 tlb_info[i].ti_flags = 0; 1229 tlbnext = i; 1230 /* Successful flushes */ 1231 tlbflush_ev.ev_count++; 1232 } 1233 } 1234 1235 void 1236 ppc4xx_tlb_flush_all(void) 1237 { 1238 u_long i; 1239 1240 for (i = 0; i < NTLB; i++) 1241 if (!TLB_LOCKED(i)) { 1242 __asm volatile( 1243 "tlbwe %0,%1,0;" 1244 "sync;isync;" 1245 : : "r" (0), "r" (i)); 1246 tlb_info[i].ti_ctx = 0; 1247 tlb_info[i].ti_flags = 0; 1248 } 1249 1250 __asm volatile("sync;isync"); 1251 } 1252 1253 /* Find a TLB entry to evict. */ 1254 static int 1255 ppc4xx_tlb_find_victim(void) 1256 { 1257 int flags; 1258 1259 for (;;) { 1260 if (++tlbnext >= NTLB) 1261 tlbnext = tlb_nreserved; 1262 flags = tlb_info[tlbnext].ti_flags; 1263 if (!(flags & TLBF_USED) || 1264 (flags & (TLBF_LOCKED | TLBF_REF)) == 0) { 1265 u_long va, stack = (u_long)&va; 1266 1267 if (!((tlb_info[tlbnext].ti_va ^ stack) & (~PGOFSET)) && 1268 (tlb_info[tlbnext].ti_ctx == KERNEL_PID) && 1269 (flags & TLBF_USED)) { 1270 /* Kernel stack page */ 1271 flags |= TLBF_USED; 1272 tlb_info[tlbnext].ti_flags = flags; 1273 } else { 1274 /* Found it! */ 1275 return (tlbnext); 1276 } 1277 } else { 1278 tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF); 1279 } 1280 } 1281 } 1282 1283 void 1284 ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte) 1285 { 1286 u_long th, tl, idx; 1287 tlbpid_t pid; 1288 u_short msr; 1289 paddr_t pa; 1290 int s, sz; 1291 1292 tlbenter_ev.ev_count++; 1293 1294 sz = (pte & TTE_SZ_MASK) >> TTE_SZ_SHIFT; 1295 pa = (pte & TTE_RPN_MASK(sz)); 1296 th = (va & TLB_EPN_MASK) | (sz << TLB_SIZE_SHFT) | TLB_VALID; 1297 tl = (pte & ~TLB_RPN_MASK) | pa; 1298 tl |= ppc4xx_tlbflags(va, pa); 1299 1300 s = splhigh(); 1301 idx = ppc4xx_tlb_find_victim(); 1302 1303 #ifdef DIAGNOSTIC 1304 if ((idx < tlb_nreserved) || (idx >= NTLB)) { 1305 panic("ppc4xx_tlb_enter: replacing entry %ld", idx); 1306 } 1307 #endif 1308 1309 tlb_info[idx].ti_va = (va & TLB_EPN_MASK); 1310 tlb_info[idx].ti_ctx = ctx; 1311 tlb_info[idx].ti_flags = TLBF_USED | TLBF_REF; 1312 1313 __asm volatile( 1314 "mfmsr %0;" /* Save MSR */ 1315 "li %1,0;" 1316 "tlbwe %1,%3,0;" /* Invalidate old entry. */ 1317 "mtmsr %1;" /* Clear MSR */ 1318 "mfpid %1;" /* Save old PID */ 1319 "mtpid %2;" /* Load translation ctx */ 1320 "sync; isync;" 1321 #ifdef DEBUG 1322 "andi. %3,%3,63;" 1323 "tweqi %3,0;" /* XXXXX DEBUG trap on index 0 */ 1324 #endif 1325 "tlbwe %4,%3,1; tlbwe %5,%3,0;" /* Set TLB */ 1326 "sync; isync;" 1327 "mtpid %1; mtmsr %0;" /* Restore PID and MSR */ 1328 "sync; isync;" 1329 : "=&r" (msr), "=&r" (pid) 1330 : "r" (ctx), "r" (idx), "r" (tl), "r" (th)); 1331 splx(s); 1332 } 1333 1334 void 1335 ppc4xx_tlb_init(void) 1336 { 1337 int i; 1338 1339 /* Mark reserved TLB entries */ 1340 for (i = 0; i < tlb_nreserved; i++) { 1341 tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED; 1342 tlb_info[i].ti_ctx = KERNEL_PID; 1343 } 1344 1345 /* Setup security zones */ 1346 /* Z0 - accessible by kernel only if TLB entry permissions allow 1347 * Z1,Z2 - access is controlled by TLB entry permissions 1348 * Z3 - full access regardless of TLB entry permissions 1349 */ 1350 1351 __asm volatile( 1352 "mtspr %0,%1;" 1353 "sync;" 1354 :: "K"(SPR_ZPR), "r" (0x1b000000)); 1355 } 1356 1357 /* 1358 * ppc4xx_tlb_size_mask: 1359 * 1360 * Roundup size to supported page size, return TLBHI mask and real size. 1361 */ 1362 static int 1363 ppc4xx_tlb_size_mask(size_t size, int *mask, int *rsiz) 1364 { 1365 int i; 1366 1367 for (i = 0; i < __arraycount(tlbsize); i++) 1368 if (size <= tlbsize[i]) { 1369 *mask = (i << TLB_SIZE_SHFT); 1370 *rsiz = tlbsize[i]; 1371 return (0); 1372 } 1373 return (EINVAL); 1374 } 1375 1376 /* 1377 * ppc4xx_tlb_mapiodev: 1378 * 1379 * Lookup virtual address of mapping previously entered via 1380 * ppc4xx_tlb_reserve. Search TLB directly so that we don't 1381 * need to waste extra storage for reserved mappings. Note 1382 * that reading TLBHI also sets PID, but all reserved mappings 1383 * use KERNEL_PID, so the side effect is nil. 1384 */ 1385 void * 1386 ppc4xx_tlb_mapiodev(paddr_t base, psize_t len) 1387 { 1388 paddr_t pa; 1389 vaddr_t va; 1390 u_int lo, hi, sz; 1391 int i; 1392 1393 /* tlb_nreserved is only allowed to grow, so this is safe. */ 1394 for (i = 0; i < tlb_nreserved; i++) { 1395 __asm volatile ( 1396 " tlbre %0,%2,1 \n" /* TLBLO */ 1397 " tlbre %1,%2,0 \n" /* TLBHI */ 1398 : "=&r" (lo), "=&r" (hi) 1399 : "r" (i)); 1400 1401 KASSERT(hi & TLB_VALID); 1402 KASSERT(mfspr(SPR_PID) == KERNEL_PID); 1403 1404 pa = (lo & TLB_RPN_MASK); 1405 if (base < pa) 1406 continue; 1407 1408 sz = tlbsize[(hi & TLB_SIZE_MASK) >> TLB_SIZE_SHFT]; 1409 if ((base + len) > (pa + sz)) 1410 continue; 1411 1412 va = (hi & TLB_EPN_MASK) + (base & (sz - 1)); /* sz = 2^n */ 1413 return (void *)(va); 1414 } 1415 1416 return (NULL); 1417 } 1418 1419 /* 1420 * ppc4xx_tlb_reserve: 1421 * 1422 * Map physical range to kernel virtual chunk via reserved TLB entry. 1423 */ 1424 void 1425 ppc4xx_tlb_reserve(paddr_t pa, vaddr_t va, size_t size, int flags) 1426 { 1427 u_int lo, hi; 1428 int szmask, rsize; 1429 1430 /* Called before pmap_bootstrap(), va outside kernel space. */ 1431 KASSERT(va < VM_MIN_KERNEL_ADDRESS || va >= VM_MAX_KERNEL_ADDRESS); 1432 KASSERT(! pmap_bootstrap_done); 1433 KASSERT(tlb_nreserved < NTLB); 1434 1435 /* Resolve size. */ 1436 if (ppc4xx_tlb_size_mask(size, &szmask, &rsize) != 0) 1437 panic("ppc4xx_tlb_reserve: entry %d, %zuB too large", 1438 size, tlb_nreserved); 1439 1440 /* Real size will be power of two >= 1024, so this is OK. */ 1441 pa &= ~(rsize - 1); /* RPN */ 1442 va &= ~(rsize - 1); /* EPN */ 1443 1444 lo = pa | TLB_WR | flags; 1445 hi = va | TLB_VALID | szmask; 1446 1447 #ifdef PPC_4XX_NOCACHE 1448 lo |= TLB_I; 1449 #endif 1450 1451 __asm volatile( 1452 " tlbwe %1,%0,1 \n" /* write TLBLO */ 1453 " tlbwe %2,%0,0 \n" /* write TLBHI */ 1454 " sync \n" 1455 " isync \n" 1456 : : "r" (tlb_nreserved), "r" (lo), "r" (hi)); 1457 1458 tlb_nreserved++; 1459 } 1460 1461 /* 1462 * We should pass the ctx in from trap code. 1463 */ 1464 int 1465 pmap_tlbmiss(vaddr_t va, int ctx) 1466 { 1467 volatile u_int *pte; 1468 u_long tte; 1469 1470 tlbmiss_ev.ev_count++; 1471 1472 /* 1473 * We will reserve 0 upto VM_MIN_KERNEL_ADDRESS for va == pa mappings. 1474 * Physical RAM is expected to live in this range, care must be taken 1475 * to not clobber 0 upto ${physmem} with device mappings in machdep 1476 * code. 1477 */ 1478 if (ctx != KERNEL_PID || 1479 (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS)) { 1480 pte = pte_find((struct pmap *)__UNVOLATILE(ctxbusy[ctx]), va); 1481 if (pte == NULL) { 1482 /* Map unmanaged addresses directly for kernel access */ 1483 return 1; 1484 } 1485 tte = *pte; 1486 if (tte == 0) { 1487 return 1; 1488 } 1489 } else { 1490 /* Create a 16MB writable mapping. */ 1491 #ifdef PPC_4XX_NOCACHE 1492 tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_I |TTE_WR; 1493 #else 1494 tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR; 1495 #endif 1496 } 1497 tlbhit_ev.ev_count++; 1498 ppc4xx_tlb_enter(ctx, va, tte); 1499 1500 return 0; 1501 } 1502 1503 /* 1504 * Flush all the entries matching a context from the TLB. 1505 */ 1506 static int 1507 ctx_flush(int cnum) 1508 { 1509 int i; 1510 1511 /* We gotta steal this context */ 1512 for (i = tlb_nreserved; i < NTLB; i++) { 1513 if (tlb_info[i].ti_ctx == cnum) { 1514 /* Can't steal ctx if it has a locked entry. */ 1515 if (TLB_LOCKED(i)) { 1516 #ifdef DIAGNOSTIC 1517 printf("ctx_flush: can't invalidate " 1518 "locked mapping %d " 1519 "for context %d\n", i, cnum); 1520 #ifdef DDB 1521 Debugger(); 1522 #endif 1523 #endif 1524 return (1); 1525 } 1526 #ifdef DIAGNOSTIC 1527 if (i < tlb_nreserved) 1528 panic("TLB entry %d not locked", i); 1529 #endif 1530 /* Invalidate particular TLB entry regardless of locked status */ 1531 __asm volatile("tlbwe %0,%1,0" : :"r"(0),"r"(i)); 1532 tlb_info[i].ti_flags = 0; 1533 } 1534 } 1535 return (0); 1536 } 1537 1538 /* 1539 * Allocate a context. If necessary, steal one from someone else. 1540 * 1541 * The new context is flushed from the TLB before returning. 1542 */ 1543 int 1544 ctx_alloc(struct pmap *pm) 1545 { 1546 int s, cnum; 1547 static int next = MINCTX; 1548 1549 if (pm == pmap_kernel()) { 1550 #ifdef DIAGNOSTIC 1551 printf("ctx_alloc: kernel pmap!\n"); 1552 #endif 1553 return (0); 1554 } 1555 s = splvm(); 1556 1557 /* Find a likely context. */ 1558 cnum = next; 1559 do { 1560 if ((++cnum) > NUMCTX) 1561 cnum = MINCTX; 1562 } while (ctxbusy[cnum] != NULL && cnum != next); 1563 1564 /* Now clean it out */ 1565 oops: 1566 if (cnum < MINCTX) 1567 cnum = MINCTX; /* Never steal ctx 0 or 1 */ 1568 if (ctx_flush(cnum)) { 1569 /* oops -- something's wired. */ 1570 if ((++cnum) > NUMCTX) 1571 cnum = MINCTX; 1572 goto oops; 1573 } 1574 1575 if (ctxbusy[cnum]) { 1576 #ifdef DEBUG 1577 /* We should identify this pmap and clear it */ 1578 printf("Warning: stealing context %d\n", cnum); 1579 #endif 1580 ctxbusy[cnum]->pm_ctx = 0; 1581 } 1582 ctxbusy[cnum] = pm; 1583 next = cnum; 1584 splx(s); 1585 pm->pm_ctx = cnum; 1586 1587 return cnum; 1588 } 1589 1590 /* 1591 * Give away a context. 1592 */ 1593 void 1594 ctx_free(struct pmap *pm) 1595 { 1596 int oldctx; 1597 1598 oldctx = pm->pm_ctx; 1599 1600 if (oldctx == 0) 1601 panic("ctx_free: freeing kernel context"); 1602 #ifdef DIAGNOSTIC 1603 if (ctxbusy[oldctx] == 0) 1604 printf("ctx_free: freeing free context %d\n", oldctx); 1605 if (ctxbusy[oldctx] != pm) { 1606 printf("ctx_free: freeing someone esle's context\n " 1607 "ctxbusy[%d] = %p, pm->pm_ctx = %p\n", 1608 oldctx, (void *)(u_long)ctxbusy[oldctx], pm); 1609 #ifdef DDB 1610 Debugger(); 1611 #endif 1612 } 1613 #endif 1614 /* We should verify it has not been stolen and reallocated... */ 1615 ctxbusy[oldctx] = NULL; 1616 ctx_flush(oldctx); 1617 } 1618 1619 1620 #ifdef DEBUG 1621 /* 1622 * Test ref/modify handling. 1623 */ 1624 void pmap_testout(void); 1625 void 1626 pmap_testout(void) 1627 { 1628 vaddr_t va; 1629 volatile int *loc; 1630 int val = 0; 1631 paddr_t pa; 1632 struct vm_page *pg; 1633 int ref, mod; 1634 1635 /* Allocate a page */ 1636 va = (vaddr_t)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 1637 UVM_KMF_WIRED | UVM_KMF_ZERO); 1638 loc = (int*)va; 1639 1640 pmap_extract(pmap_kernel(), va, &pa); 1641 pg = PHYS_TO_VM_PAGE(pa); 1642 pmap_unwire(pmap_kernel(), va); 1643 1644 pmap_kremove(va, PAGE_SIZE); 1645 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1646 pmap_update(pmap_kernel()); 1647 1648 /* Now clear reference and modify */ 1649 ref = pmap_clear_reference(pg); 1650 mod = pmap_clear_modify(pg); 1651 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1652 (void *)(u_long)va, (long)pa, 1653 ref, mod); 1654 1655 /* Check it's properly cleared */ 1656 ref = pmap_is_referenced(pg); 1657 mod = pmap_is_modified(pg); 1658 printf("Checking cleared page: ref %d, mod %d\n", 1659 ref, mod); 1660 1661 /* Reference page */ 1662 val = *loc; 1663 1664 ref = pmap_is_referenced(pg); 1665 mod = pmap_is_modified(pg); 1666 printf("Referenced page: ref %d, mod %d val %x\n", 1667 ref, mod, val); 1668 1669 /* Now clear reference and modify */ 1670 ref = pmap_clear_reference(pg); 1671 mod = pmap_clear_modify(pg); 1672 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1673 (void *)(u_long)va, (long)pa, 1674 ref, mod); 1675 1676 /* Modify page */ 1677 *loc = 1; 1678 1679 ref = pmap_is_referenced(pg); 1680 mod = pmap_is_modified(pg); 1681 printf("Modified page: ref %d, mod %d\n", 1682 ref, mod); 1683 1684 /* Now clear reference and modify */ 1685 ref = pmap_clear_reference(pg); 1686 mod = pmap_clear_modify(pg); 1687 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1688 (void *)(u_long)va, (long)pa, 1689 ref, mod); 1690 1691 /* Check it's properly cleared */ 1692 ref = pmap_is_referenced(pg); 1693 mod = pmap_is_modified(pg); 1694 printf("Checking cleared page: ref %d, mod %d\n", 1695 ref, mod); 1696 1697 /* Modify page */ 1698 *loc = 1; 1699 1700 ref = pmap_is_referenced(pg); 1701 mod = pmap_is_modified(pg); 1702 printf("Modified page: ref %d, mod %d\n", 1703 ref, mod); 1704 1705 /* Check pmap_protect() */ 1706 pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ); 1707 pmap_update(pmap_kernel()); 1708 ref = pmap_is_referenced(pg); 1709 mod = pmap_is_modified(pg); 1710 printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n", 1711 ref, mod); 1712 1713 /* Now clear reference and modify */ 1714 ref = pmap_clear_reference(pg); 1715 mod = pmap_clear_modify(pg); 1716 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1717 (void *)(u_long)va, (long)pa, 1718 ref, mod); 1719 1720 /* Reference page */ 1721 val = *loc; 1722 1723 ref = pmap_is_referenced(pg); 1724 mod = pmap_is_modified(pg); 1725 printf("Referenced page: ref %d, mod %d val %x\n", 1726 ref, mod, val); 1727 1728 /* Now clear reference and modify */ 1729 ref = pmap_clear_reference(pg); 1730 mod = pmap_clear_modify(pg); 1731 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1732 (void *)(u_long)va, (long)pa, 1733 ref, mod); 1734 1735 /* Modify page */ 1736 #if 0 1737 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1738 pmap_update(pmap_kernel()); 1739 #endif 1740 *loc = 1; 1741 1742 ref = pmap_is_referenced(pg); 1743 mod = pmap_is_modified(pg); 1744 printf("Modified page: ref %d, mod %d\n", 1745 ref, mod); 1746 1747 /* Check pmap_protect() */ 1748 pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE); 1749 pmap_update(pmap_kernel()); 1750 ref = pmap_is_referenced(pg); 1751 mod = pmap_is_modified(pg); 1752 printf("pmap_protect(): ref %d, mod %d\n", 1753 ref, mod); 1754 1755 /* Now clear reference and modify */ 1756 ref = pmap_clear_reference(pg); 1757 mod = pmap_clear_modify(pg); 1758 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1759 (void *)(u_long)va, (long)pa, 1760 ref, mod); 1761 1762 /* Reference page */ 1763 val = *loc; 1764 1765 ref = pmap_is_referenced(pg); 1766 mod = pmap_is_modified(pg); 1767 printf("Referenced page: ref %d, mod %d val %x\n", 1768 ref, mod, val); 1769 1770 /* Now clear reference and modify */ 1771 ref = pmap_clear_reference(pg); 1772 mod = pmap_clear_modify(pg); 1773 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1774 (void *)(u_long)va, (long)pa, 1775 ref, mod); 1776 1777 /* Modify page */ 1778 #if 0 1779 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1780 pmap_update(pmap_kernel()); 1781 #endif 1782 *loc = 1; 1783 1784 ref = pmap_is_referenced(pg); 1785 mod = pmap_is_modified(pg); 1786 printf("Modified page: ref %d, mod %d\n", 1787 ref, mod); 1788 1789 /* Check pmap_pag_protect() */ 1790 pmap_page_protect(pg, VM_PROT_READ); 1791 ref = pmap_is_referenced(pg); 1792 mod = pmap_is_modified(pg); 1793 printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n", 1794 ref, mod); 1795 1796 /* Now clear reference and modify */ 1797 ref = pmap_clear_reference(pg); 1798 mod = pmap_clear_modify(pg); 1799 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1800 (void *)(u_long)va, (long)pa, 1801 ref, mod); 1802 1803 /* Reference page */ 1804 val = *loc; 1805 1806 ref = pmap_is_referenced(pg); 1807 mod = pmap_is_modified(pg); 1808 printf("Referenced page: ref %d, mod %d val %x\n", 1809 ref, mod, val); 1810 1811 /* Now clear reference and modify */ 1812 ref = pmap_clear_reference(pg); 1813 mod = pmap_clear_modify(pg); 1814 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1815 (void *)(u_long)va, (long)pa, 1816 ref, mod); 1817 1818 /* Modify page */ 1819 #if 0 1820 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1821 pmap_update(pmap_kernel()); 1822 #endif 1823 *loc = 1; 1824 1825 ref = pmap_is_referenced(pg); 1826 mod = pmap_is_modified(pg); 1827 printf("Modified page: ref %d, mod %d\n", 1828 ref, mod); 1829 1830 /* Check pmap_pag_protect() */ 1831 pmap_page_protect(pg, VM_PROT_NONE); 1832 ref = pmap_is_referenced(pg); 1833 mod = pmap_is_modified(pg); 1834 printf("pmap_page_protect(): ref %d, mod %d\n", 1835 ref, mod); 1836 1837 /* Now clear reference and modify */ 1838 ref = pmap_clear_reference(pg); 1839 mod = pmap_clear_modify(pg); 1840 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1841 (void *)(u_long)va, (long)pa, 1842 ref, mod); 1843 1844 1845 /* Reference page */ 1846 val = *loc; 1847 1848 ref = pmap_is_referenced(pg); 1849 mod = pmap_is_modified(pg); 1850 printf("Referenced page: ref %d, mod %d val %x\n", 1851 ref, mod, val); 1852 1853 /* Now clear reference and modify */ 1854 ref = pmap_clear_reference(pg); 1855 mod = pmap_clear_modify(pg); 1856 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1857 (void *)(u_long)va, (long)pa, 1858 ref, mod); 1859 1860 /* Modify page */ 1861 #if 0 1862 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1863 pmap_update(pmap_kernel()); 1864 #endif 1865 *loc = 1; 1866 1867 ref = pmap_is_referenced(pg); 1868 mod = pmap_is_modified(pg); 1869 printf("Modified page: ref %d, mod %d\n", 1870 ref, mod); 1871 1872 /* Unmap page */ 1873 pmap_remove(pmap_kernel(), va, va+1); 1874 pmap_update(pmap_kernel()); 1875 ref = pmap_is_referenced(pg); 1876 mod = pmap_is_modified(pg); 1877 printf("Unmapped page: ref %d, mod %d\n", ref, mod); 1878 1879 /* Now clear reference and modify */ 1880 ref = pmap_clear_reference(pg); 1881 mod = pmap_clear_modify(pg); 1882 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1883 (void *)(u_long)va, (long)pa, ref, mod); 1884 1885 /* Check it's properly cleared */ 1886 ref = pmap_is_referenced(pg); 1887 mod = pmap_is_modified(pg); 1888 printf("Checking cleared page: ref %d, mod %d\n", 1889 ref, mod); 1890 1891 pmap_remove(pmap_kernel(), va, va + PAGE_SIZE); 1892 pmap_kenter_pa(va, pa, VM_PROT_ALL, 0); 1893 uvm_km_free(kernel_map, (vaddr_t)va, PAGE_SIZE, UVM_KMF_WIRED); 1894 } 1895 #endif 1896