1 /* $NetBSD: pmap.c,v 1.14 2002/11/24 17:05:45 thorpej Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 40 * Copyright (C) 1995, 1996 TooLs GmbH. 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by TooLs GmbH. 54 * 4. The name of TooLs GmbH may not be used to endorse or promote products 55 * derived from this software without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 */ 68 69 #undef PPC_4XX_NOCACHE 70 71 #include <sys/param.h> 72 #include <sys/malloc.h> 73 #include <sys/proc.h> 74 #include <sys/user.h> 75 #include <sys/queue.h> 76 #include <sys/systm.h> 77 #include <sys/pool.h> 78 #include <sys/device.h> 79 80 #include <uvm/uvm.h> 81 82 #include <machine/cpu.h> 83 #include <machine/pcb.h> 84 #include <machine/powerpc.h> 85 86 #include <powerpc/spr.h> 87 #include <machine/tlb.h> 88 89 /* 90 * kernmap is an array of PTEs large enough to map in 91 * 4GB. At 16KB/page it is 256K entries or 2MB. 92 */ 93 #define KERNMAP_SIZE ((0xffffffffU/NBPG)+1) 94 caddr_t kernmap; 95 96 #define MINCTX 2 97 #define NUMCTX 256 98 volatile struct pmap *ctxbusy[NUMCTX]; 99 100 #define TLBF_USED 0x1 101 #define TLBF_REF 0x2 102 #define TLBF_LOCKED 0x4 103 #define TLB_LOCKED(i) (tlb_info[(i)].ti_flags & TLBF_LOCKED) 104 typedef struct tlb_info_s { 105 char ti_flags; 106 char ti_ctx; /* TLB_PID assiciated with the entry */ 107 u_int ti_va; 108 } tlb_info_t; 109 110 volatile tlb_info_t tlb_info[NTLB]; 111 /* We'll use a modified FIFO replacement policy cause it's cheap */ 112 volatile int tlbnext = TLB_NRESERVED; 113 114 u_long dtlb_miss_count = 0; 115 u_long itlb_miss_count = 0; 116 u_long ktlb_miss_count = 0; 117 u_long utlb_miss_count = 0; 118 119 /* Event counters */ 120 struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, 121 NULL, "cpu", "tlbmiss"); 122 struct evcnt tlbhit_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, 123 NULL, "cpu", "tlbhit"); 124 struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, 125 NULL, "cpu", "tlbflush"); 126 struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP, 127 NULL, "cpu", "tlbenter"); 128 129 struct pmap kernel_pmap_; 130 131 int physmem; 132 static int npgs; 133 static u_int nextavail; 134 #ifndef MSGBUFADDR 135 extern paddr_t msgbuf_paddr; 136 #endif 137 138 static struct mem_region *mem, *avail; 139 140 /* 141 * This is a cache of referenced/modified bits. 142 * Bits herein are shifted by ATTRSHFT. 143 */ 144 static char *pmap_attrib; 145 146 #define PV_WIRED 0x1 147 #define PV_WIRE(pv) ((pv)->pv_va |= PV_WIRED) 148 #define PV_CMPVA(va,pv) (!(((pv)->pv_va^(va))&(~PV_WIRED))) 149 150 struct pv_entry { 151 struct pv_entry *pv_next; /* Linked list of mappings */ 152 vaddr_t pv_va; /* virtual address of mapping */ 153 struct pmap *pv_pm; 154 }; 155 156 struct pv_entry *pv_table; 157 static struct pool pv_pool; 158 159 static int pmap_initialized; 160 161 static int ctx_flush(int); 162 163 inline struct pv_entry *pa_to_pv(paddr_t); 164 static inline char *pa_to_attr(paddr_t); 165 166 static inline volatile u_int *pte_find(struct pmap *, vaddr_t); 167 static inline int pte_enter(struct pmap *, vaddr_t, u_int); 168 169 static void pmap_pinit(pmap_t); 170 static void pmap_release(pmap_t); 171 static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t); 172 static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t); 173 174 175 inline struct pv_entry * 176 pa_to_pv(paddr_t pa) 177 { 178 int bank, pg; 179 180 bank = vm_physseg_find(atop(pa), &pg); 181 if (bank == -1) 182 return NULL; 183 return &vm_physmem[bank].pmseg.pvent[pg]; 184 } 185 186 static inline char * 187 pa_to_attr(paddr_t pa) 188 { 189 int bank, pg; 190 191 bank = vm_physseg_find(atop(pa), &pg); 192 if (bank == -1) 193 return NULL; 194 return &vm_physmem[bank].pmseg.attrs[pg]; 195 } 196 197 /* 198 * Insert PTE into page table. 199 */ 200 int 201 pte_enter(struct pmap *pm, vaddr_t va, u_int pte) 202 { 203 int seg = STIDX(va); 204 int ptn = PTIDX(va); 205 paddr_t pa; 206 207 if (!pm->pm_ptbl[seg]) { 208 /* Don't allocate a page to clear a non-existent mapping. */ 209 if (!pte) return (1); 210 /* Allocate a page XXXX this will sleep! */ 211 pa = 0; 212 pm->pm_ptbl[seg] = (uint *)uvm_km_alloc1(kernel_map, NBPG, 1); 213 } 214 pm->pm_ptbl[seg][ptn] = pte; 215 216 /* Flush entry. */ 217 ppc4xx_tlb_flush(va, pm->pm_ctx); 218 return (1); 219 } 220 221 /* 222 * Get a pointer to a PTE in a page table. 223 */ 224 volatile u_int * 225 pte_find(struct pmap *pm, vaddr_t va) 226 { 227 int seg = STIDX(va); 228 int ptn = PTIDX(va); 229 230 if (pm->pm_ptbl[seg]) 231 return (&pm->pm_ptbl[seg][ptn]); 232 233 return (NULL); 234 } 235 236 /* 237 * This is called during initppc, before the system is really initialized. 238 */ 239 void 240 pmap_bootstrap(u_int kernelstart, u_int kernelend) 241 { 242 struct mem_region *mp, *mp1; 243 int cnt, i; 244 u_int s, e, sz; 245 246 /* 247 * Allocate the kernel page table at the end of 248 * kernel space so it's in the locked TTE. 249 */ 250 kernmap = (caddr_t)kernelend; 251 252 /* 253 * Initialize kernel page table. 254 */ 255 for (i = 0; i < STSZ; i++) { 256 pmap_kernel()->pm_ptbl[i] = 0; 257 } 258 ctxbusy[0] = ctxbusy[1] = pmap_kernel(); 259 260 /* 261 * Announce page-size to the VM-system 262 */ 263 uvmexp.pagesize = NBPG; 264 uvm_setpagesize(); 265 266 /* 267 * Get memory. 268 */ 269 mem_regions(&mem, &avail); 270 for (mp = mem; mp->size; mp++) { 271 physmem += btoc(mp->size); 272 printf("+%lx,",mp->size); 273 } 274 printf("\n"); 275 ppc4xx_tlb_init(); 276 /* 277 * Count the number of available entries. 278 */ 279 for (cnt = 0, mp = avail; mp->size; mp++) 280 cnt++; 281 282 /* 283 * Page align all regions. 284 * Non-page aligned memory isn't very interesting to us. 285 * Also, sort the entries for ascending addresses. 286 */ 287 kernelstart &= ~PGOFSET; 288 kernelend = (kernelend + PGOFSET) & ~PGOFSET; 289 for (mp = avail; mp->size; mp++) { 290 s = mp->start; 291 e = mp->start + mp->size; 292 printf("%08x-%08x -> ",s,e); 293 /* 294 * Check whether this region holds all of the kernel. 295 */ 296 if (s < kernelstart && e > kernelend) { 297 avail[cnt].start = kernelend; 298 avail[cnt++].size = e - kernelend; 299 e = kernelstart; 300 } 301 /* 302 * Look whether this regions starts within the kernel. 303 */ 304 if (s >= kernelstart && s < kernelend) { 305 if (e <= kernelend) 306 goto empty; 307 s = kernelend; 308 } 309 /* 310 * Now look whether this region ends within the kernel. 311 */ 312 if (e > kernelstart && e <= kernelend) { 313 if (s >= kernelstart) 314 goto empty; 315 e = kernelstart; 316 } 317 /* 318 * Now page align the start and size of the region. 319 */ 320 s = round_page(s); 321 e = trunc_page(e); 322 if (e < s) 323 e = s; 324 sz = e - s; 325 printf("%08x-%08x = %x\n",s,e,sz); 326 /* 327 * Check whether some memory is left here. 328 */ 329 if (sz == 0) { 330 empty: 331 memmove(mp, mp + 1, 332 (cnt - (mp - avail)) * sizeof *mp); 333 cnt--; 334 mp--; 335 continue; 336 } 337 /* 338 * Do an insertion sort. 339 */ 340 npgs += btoc(sz); 341 for (mp1 = avail; mp1 < mp; mp1++) 342 if (s < mp1->start) 343 break; 344 if (mp1 < mp) { 345 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 346 mp1->start = s; 347 mp1->size = sz; 348 } else { 349 mp->start = s; 350 mp->size = sz; 351 } 352 } 353 354 /* 355 * We cannot do pmap_steal_memory here, 356 * since we don't run with translation enabled yet. 357 */ 358 #ifndef MSGBUFADDR 359 /* 360 * allow for msgbuf 361 */ 362 sz = round_page(MSGBUFSIZE); 363 mp = NULL; 364 for (mp1 = avail; mp1->size; mp1++) 365 if (mp1->size >= sz) 366 mp = mp1; 367 if (mp == NULL) 368 panic("not enough memory?"); 369 370 npgs -= btoc(sz); 371 msgbuf_paddr = mp->start + mp->size - sz; 372 mp->size -= sz; 373 if (mp->size <= 0) 374 memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof *mp); 375 #endif 376 377 printf("Loading pages\n"); 378 for (mp = avail; mp->size; mp++) 379 uvm_page_physload(atop(mp->start), atop(mp->start + mp->size), 380 atop(mp->start), atop(mp->start + mp->size), 381 VM_FREELIST_DEFAULT); 382 383 /* 384 * Initialize kernel pmap and hardware. 385 */ 386 /* Setup TLB pid allocator so it knows we alreadu using PID 1 */ 387 pmap_kernel()->pm_ctx = KERNEL_PID; 388 nextavail = avail->start; 389 390 391 evcnt_attach_static(&tlbhit_ev); 392 evcnt_attach_static(&tlbmiss_ev); 393 evcnt_attach_static(&tlbflush_ev); 394 evcnt_attach_static(&tlbenter_ev); 395 printf("Done\n"); 396 } 397 398 /* 399 * Restrict given range to physical memory 400 * 401 * (Used by /dev/mem) 402 */ 403 void 404 pmap_real_memory(paddr_t *start, psize_t *size) 405 { 406 struct mem_region *mp; 407 408 for (mp = mem; mp->size; mp++) { 409 if (*start + *size > mp->start && 410 *start < mp->start + mp->size) { 411 if (*start < mp->start) { 412 *size -= mp->start - *start; 413 *start = mp->start; 414 } 415 if (*start + *size > mp->start + mp->size) 416 *size = mp->start + mp->size - *start; 417 return; 418 } 419 } 420 *size = 0; 421 } 422 423 /* 424 * Initialize anything else for pmap handling. 425 * Called during vm_init(). 426 */ 427 void 428 pmap_init(void) 429 { 430 struct pv_entry *pv; 431 vsize_t sz; 432 vaddr_t addr; 433 int i, s; 434 int bank; 435 char *attr; 436 437 sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs); 438 sz = round_page(sz); 439 addr = uvm_km_zalloc(kernel_map, sz); 440 s = splvm(); 441 pv = pv_table = (struct pv_entry *)addr; 442 for (i = npgs; --i >= 0;) 443 pv++->pv_pm = NULL; 444 pmap_attrib = (char *)pv; 445 memset(pv, 0, npgs); 446 447 pv = pv_table; 448 attr = pmap_attrib; 449 for (bank = 0; bank < vm_nphysseg; bank++) { 450 sz = vm_physmem[bank].end - vm_physmem[bank].start; 451 vm_physmem[bank].pmseg.pvent = pv; 452 vm_physmem[bank].pmseg.attrs = attr; 453 pv += sz; 454 attr += sz; 455 } 456 457 pmap_initialized = 1; 458 splx(s); 459 460 /* Setup a pool for additional pvlist structures */ 461 pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", NULL); 462 } 463 464 /* 465 * How much virtual space is available to the kernel? 466 */ 467 void 468 pmap_virtual_space(vaddr_t *start, vaddr_t *end) 469 { 470 471 #if 0 472 /* 473 * Reserve one segment for kernel virtual memory 474 */ 475 *start = (vaddr_t)(KERNEL_SR << ADDR_SR_SHFT); 476 *end = *start + SEGMENT_LENGTH; 477 #else 478 *start = (vaddr_t) VM_MIN_KERNEL_ADDRESS; 479 *end = (vaddr_t) VM_MAX_KERNEL_ADDRESS; 480 #endif 481 } 482 483 #ifdef PMAP_GROWKERNEL 484 /* 485 * Preallocate kernel page tables to a specified VA. 486 * This simply loops through the first TTE for each 487 * page table from the beginning of the kernel pmap, 488 * reads the entry, and if the result is 489 * zero (either invalid entry or no page table) it stores 490 * a zero there, populating page tables in the process. 491 * This is not the most efficient technique but i don't 492 * expect it to be called that often. 493 */ 494 extern struct vm_page *vm_page_alloc1 __P((void)); 495 extern void vm_page_free1 __P((struct vm_page *)); 496 497 vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS; 498 499 vaddr_t 500 pmap_growkernel(maxkvaddr) 501 vaddr_t maxkvaddr; 502 { 503 int s; 504 int seg; 505 paddr_t pg; 506 struct pmap *pm = pmap_kernel(); 507 508 s = splvm(); 509 510 /* Align with the start of a page table */ 511 for (kbreak &= ~(PTMAP-1); kbreak < maxkvaddr; 512 kbreak += PTMAP) { 513 seg = STIDX(kbreak); 514 515 if (pte_find(pm, kbreak)) continue; 516 517 if (uvm.page_init_done) { 518 pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1()); 519 } else { 520 if (!uvm_page_physget(&pg)) 521 panic("pmap_growkernel: no memory"); 522 } 523 if (!pg) panic("pmap_growkernel: no pages"); 524 pmap_zero_page((paddr_t)pg); 525 526 /* XXX This is based on all phymem being addressable */ 527 pm->pm_ptbl[seg] = (u_int *)pg; 528 } 529 splx(s); 530 return (kbreak); 531 } 532 533 /* 534 * vm_page_alloc1: 535 * 536 * Allocate and return a memory cell with no associated object. 537 */ 538 struct vm_page * 539 vm_page_alloc1() 540 { 541 struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 542 if (pg) { 543 pg->wire_count = 1; /* no mappings yet */ 544 pg->flags &= ~PG_BUSY; /* never busy */ 545 } 546 return pg; 547 } 548 549 /* 550 * vm_page_free1: 551 * 552 * Returns the given page to the free list, 553 * disassociating it with any VM object. 554 * 555 * Object and page must be locked prior to entry. 556 */ 557 void 558 vm_page_free1(mem) 559 struct vm_page *mem; 560 { 561 #ifdef DIAGNOSTIC 562 if (mem->flags != (PG_CLEAN|PG_FAKE)) { 563 printf("Freeing invalid page %p\n", mem); 564 printf("pa = %llx\n", (unsigned long long)VM_PAGE_TO_PHYS(mem)); 565 #ifdef DDB 566 Debugger(); 567 #endif 568 return; 569 } 570 #endif 571 mem->flags |= PG_BUSY; 572 mem->wire_count = 0; 573 uvm_pagefree(mem); 574 } 575 #endif 576 577 /* 578 * Create and return a physical map. 579 */ 580 struct pmap * 581 pmap_create(void) 582 { 583 struct pmap *pm; 584 585 pm = (struct pmap *)malloc(sizeof *pm, M_VMPMAP, M_WAITOK); 586 memset((caddr_t)pm, 0, sizeof *pm); 587 pmap_pinit(pm); 588 return pm; 589 } 590 591 /* 592 * Initialize a preallocated and zeroed pmap structure. 593 */ 594 void 595 pmap_pinit(struct pmap *pm) 596 { 597 int i; 598 599 /* 600 * Allocate some segment registers for this pmap. 601 */ 602 pm->pm_refs = 1; 603 for (i = 0; i < STSZ; i++) 604 pm->pm_ptbl[i] = NULL; 605 } 606 607 /* 608 * Add a reference to the given pmap. 609 */ 610 void 611 pmap_reference(struct pmap *pm) 612 { 613 614 pm->pm_refs++; 615 } 616 617 /* 618 * Retire the given pmap from service. 619 * Should only be called if the map contains no valid mappings. 620 */ 621 void 622 pmap_destroy(struct pmap *pm) 623 { 624 625 if (--pm->pm_refs == 0) { 626 pmap_release(pm); 627 free((caddr_t)pm, M_VMPMAP); 628 } 629 } 630 631 /* 632 * Release any resources held by the given physical map. 633 * Called when a pmap initialized by pmap_pinit is being released. 634 */ 635 static void 636 pmap_release(struct pmap *pm) 637 { 638 int i; 639 640 for (i = 0; i < STSZ; i++) 641 if (pm->pm_ptbl[i]) { 642 uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i], NBPG); 643 pm->pm_ptbl[i] = NULL; 644 } 645 if (pm->pm_ctx) ctx_free(pm); 646 } 647 648 /* 649 * Copy the range specified by src_addr/len 650 * from the source map to the range dst_addr/len 651 * in the destination map. 652 * 653 * This routine is only advisory and need not do anything. 654 */ 655 void 656 pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr, 657 vsize_t len, vaddr_t src_addr) 658 { 659 } 660 661 /* 662 * Require that all active physical maps contain no 663 * incorrect entries NOW. 664 */ 665 void 666 pmap_update(struct pmap *pmap) 667 { 668 } 669 670 /* 671 * Garbage collects the physical map system for 672 * pages which are no longer used. 673 * Success need not be guaranteed -- that is, there 674 * may well be pages which are not referenced, but 675 * others may be collected. 676 * Called by the pageout daemon when pages are scarce. 677 */ 678 void 679 pmap_collect(struct pmap *pm) 680 { 681 } 682 683 /* 684 * Fill the given physical page with zeroes. 685 */ 686 void 687 pmap_zero_page(paddr_t pa) 688 { 689 690 #ifdef PPC_4XX_NOCACHE 691 memset((caddr_t)pa, 0, NBPG); 692 #else 693 int i; 694 695 for (i = NBPG/CACHELINESIZE; i > 0; i--) { 696 __asm __volatile ("dcbz 0,%0" :: "r"(pa)); 697 pa += CACHELINESIZE; 698 } 699 #endif 700 } 701 702 /* 703 * Copy the given physical source page to its destination. 704 */ 705 void 706 pmap_copy_page(paddr_t src, paddr_t dst) 707 { 708 709 memcpy((caddr_t)dst, (caddr_t)src, NBPG); 710 dcache_flush_page(dst); 711 } 712 713 /* 714 * This returns whether this is the first mapping of a page. 715 */ 716 static inline int 717 pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa) 718 { 719 struct pv_entry *pv, *npv = NULL; 720 int s; 721 722 if (!pmap_initialized) 723 return 0; 724 725 s = splvm(); 726 727 pv = pa_to_pv(pa); 728 for (npv = pv; npv; npv = npv->pv_next) 729 if (npv->pv_va == va && npv->pv_pm == pm) { 730 printf("Duplicate pv: va %lx pm %p\n", va, pm); 731 Debugger(); 732 return (1); 733 } 734 735 if (!pv->pv_pm) { 736 /* 737 * No entries yet, use header as the first entry. 738 */ 739 pv->pv_va = va; 740 pv->pv_pm = pm; 741 pv->pv_next = NULL; 742 } else { 743 /* 744 * There is at least one other VA mapping this page. 745 * Place this entry after the header. 746 */ 747 npv = pool_get(&pv_pool, PR_WAITOK); 748 if (!npv) return (0); 749 npv->pv_va = va; 750 npv->pv_pm = pm; 751 npv->pv_next = pv->pv_next; 752 pv->pv_next = npv; 753 } 754 splx(s); 755 return (1); 756 } 757 758 static void 759 pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa) 760 { 761 struct pv_entry *pv, *npv; 762 763 /* 764 * Remove from the PV table. 765 */ 766 pv = pa_to_pv(pa); 767 if (!pv) return; 768 769 /* 770 * If it is the first entry on the list, it is actually 771 * in the header and we must copy the following entry up 772 * to the header. Otherwise we must search the list for 773 * the entry. In either case we free the now unused entry. 774 */ 775 if (pm == pv->pv_pm && PV_CMPVA(va, pv)) { 776 if ((npv = pv->pv_next)) { 777 *pv = *npv; 778 pool_put(&pv_pool, npv); 779 } else 780 pv->pv_pm = NULL; 781 } else { 782 for (; (npv = pv->pv_next) != NULL; pv = npv) 783 if (pm == npv->pv_pm && PV_CMPVA(va, npv)) 784 break; 785 if (npv) { 786 pv->pv_next = npv->pv_next; 787 pool_put(&pv_pool, npv); 788 } 789 } 790 } 791 792 /* 793 * Insert physical page at pa into the given pmap at virtual address va. 794 */ 795 int 796 pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) 797 { 798 int s; 799 u_int tte; 800 int managed; 801 802 /* 803 * Have to remove any existing mapping first. 804 */ 805 pmap_remove(pm, va, va + NBPG); 806 807 if (flags & PMAP_WIRED) flags |= prot; 808 809 /* If it has no protections don't bother w/the rest */ 810 if (!(flags & VM_PROT_ALL)) 811 return (0); 812 813 managed = 0; 814 if (vm_physseg_find(atop(pa), NULL) != -1) 815 managed = 1; 816 817 /* 818 * Generate TTE. 819 * 820 * XXXX 821 * 822 * Since the kernel does not handle execution privileges properly, 823 * we will handle read and execute permissions together. 824 */ 825 tte = TTE_PA(pa) | TTE_EX; 826 /* XXXX -- need to support multiple page sizes. */ 827 tte |= TTE_SZ_16K; 828 #ifdef DIAGNOSTIC 829 if ((flags & (PME_NOCACHE | PME_WRITETHROUG)) == 830 (PME_NOCACHE | PME_WRITETHROUG)) 831 panic("pmap_enter: uncached & writethrough"); 832 #endif 833 if (flags & PME_NOCACHE) 834 /* Must be I/O mapping */ 835 tte |= TTE_I | TTE_G; 836 #ifdef PPC_4XX_NOCACHE 837 tte |= TTE_I; 838 #else 839 else if (flags & PME_WRITETHROUG) 840 /* Uncached and writethrough are not compatible */ 841 tte |= TTE_W; 842 #endif 843 if (pm == pmap_kernel()) 844 tte |= TTE_ZONE(ZONE_PRIV); 845 else 846 tte |= TTE_ZONE(ZONE_USER); 847 848 if (flags & VM_PROT_WRITE) 849 tte |= TTE_WR; 850 851 /* 852 * Now record mapping for later back-translation. 853 */ 854 if (pmap_initialized && managed) { 855 char *attr; 856 857 if (!pmap_enter_pv(pm, va, pa)) { 858 /* Could not enter pv on a managed page */ 859 return 1; 860 } 861 862 /* Now set attributes. */ 863 attr = pa_to_attr(pa); 864 #ifdef DIAGNOSTIC 865 if (!attr) 866 panic("managed but no attr"); 867 #endif 868 if (flags & VM_PROT_ALL) 869 *attr |= PTE_HI_REF; 870 if (flags & VM_PROT_WRITE) 871 *attr |= PTE_HI_CHG; 872 } 873 874 s = splvm(); 875 pm->pm_stats.resident_count++; 876 877 /* Insert page into page table. */ 878 pte_enter(pm, va, tte); 879 880 /* If this is a real fault, enter it in the tlb */ 881 if (tte && ((flags & PMAP_WIRED) == 0)) { 882 ppc4xx_tlb_enter(pm->pm_ctx, va, tte); 883 } 884 splx(s); 885 886 /* Flush the real memory from the instruction cache. */ 887 if ((prot & VM_PROT_EXECUTE) && (tte & TTE_I) == 0) 888 __syncicache((void *)pa, PAGE_SIZE); 889 890 return 0; 891 } 892 893 void 894 pmap_unwire(struct pmap *pm, vaddr_t va) 895 { 896 struct pv_entry *pv, *npv; 897 paddr_t pa; 898 int s = splvm(); 899 900 if (pm == NULL) { 901 return; 902 } 903 904 if (!pmap_extract(pm, va, &pa)) { 905 return; 906 } 907 908 va |= PV_WIRED; 909 910 pv = pa_to_pv(pa); 911 if (!pv) return; 912 913 /* 914 * If it is the first entry on the list, it is actually 915 * in the header and we must copy the following entry up 916 * to the header. Otherwise we must search the list for 917 * the entry. In either case we free the now unused entry. 918 */ 919 for (npv = pv; (npv = pv->pv_next) != NULL; pv = npv) { 920 if (pm == npv->pv_pm && PV_CMPVA(va, npv)) { 921 npv->pv_va &= ~PV_WIRED; 922 break; 923 } 924 } 925 splx(s); 926 } 927 928 void 929 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) 930 { 931 int s; 932 u_int tte; 933 struct pmap *pm = pmap_kernel(); 934 935 /* 936 * Have to remove any existing mapping first. 937 */ 938 939 /* 940 * Generate TTE. 941 * 942 * XXXX 943 * 944 * Since the kernel does not handle execution privileges properly, 945 * we will handle read and execute permissions together. 946 */ 947 tte = 0; 948 if (prot & VM_PROT_ALL) { 949 950 tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV); 951 /* XXXX -- need to support multiple page sizes. */ 952 tte |= TTE_SZ_16K; 953 #ifdef DIAGNOSTIC 954 if ((prot & (PME_NOCACHE | PME_WRITETHROUG)) == 955 (PME_NOCACHE | PME_WRITETHROUG)) 956 panic("pmap_kenter_pa: uncached & writethrough"); 957 #endif 958 if (prot & PME_NOCACHE) 959 /* Must be I/O mapping */ 960 tte |= TTE_I | TTE_G; 961 #ifdef PPC_4XX_NOCACHE 962 tte |= TTE_I; 963 #else 964 else if (prot & PME_WRITETHROUG) 965 /* Uncached and writethrough are not compatible */ 966 tte |= TTE_W; 967 #endif 968 if (prot & VM_PROT_WRITE) 969 tte |= TTE_WR; 970 } 971 972 s = splvm(); 973 pm->pm_stats.resident_count++; 974 975 /* Insert page into page table. */ 976 pte_enter(pm, va, tte); 977 splx(s); 978 } 979 980 void 981 pmap_kremove(vaddr_t va, vsize_t len) 982 { 983 984 while (len > 0) { 985 pte_enter(pmap_kernel(), va, 0); 986 va += PAGE_SIZE; 987 len -= PAGE_SIZE; 988 } 989 } 990 991 /* 992 * Remove the given range of mapping entries. 993 */ 994 void 995 pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) 996 { 997 int s; 998 paddr_t pa; 999 volatile u_int *ptp; 1000 1001 s = splvm(); 1002 while (va < endva) { 1003 1004 if ((ptp = pte_find(pm, va)) && (pa = *ptp)) { 1005 pa = TTE_PA(pa); 1006 pmap_remove_pv(pm, va, pa); 1007 *ptp = 0; 1008 ppc4xx_tlb_flush(va, pm->pm_ctx); 1009 pm->pm_stats.resident_count--; 1010 } 1011 va += NBPG; 1012 } 1013 1014 splx(s); 1015 } 1016 1017 /* 1018 * Get the physical page address for the given pmap/virtual address. 1019 */ 1020 boolean_t 1021 pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap) 1022 { 1023 int seg = STIDX(va); 1024 int ptn = PTIDX(va); 1025 u_int pa = 0; 1026 int s = splvm(); 1027 1028 if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn])) { 1029 *pap = TTE_PA(pa) | (va & PGOFSET); 1030 } 1031 splx(s); 1032 return (pa != 0); 1033 } 1034 1035 /* 1036 * Lower the protection on the specified range of this pmap. 1037 * 1038 * There are only two cases: either the protection is going to 0, 1039 * or it is going to read-only. 1040 */ 1041 void 1042 pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 1043 { 1044 volatile u_int *ptp; 1045 int s; 1046 1047 if (prot & VM_PROT_READ) { 1048 s = splvm(); 1049 while (sva < eva) { 1050 if ((ptp = pte_find(pm, sva)) != NULL) { 1051 *ptp &= ~TTE_WR; 1052 ppc4xx_tlb_flush(sva, pm->pm_ctx); 1053 } 1054 sva += NBPG; 1055 } 1056 splx(s); 1057 return; 1058 } 1059 pmap_remove(pm, sva, eva); 1060 } 1061 1062 boolean_t 1063 check_attr(struct vm_page *pg, u_int mask, int clear) 1064 { 1065 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1066 int s; 1067 char *attr; 1068 int rv; 1069 1070 /* 1071 * First modify bits in cache. 1072 */ 1073 s = splvm(); 1074 attr = pa_to_attr(pa); 1075 if (attr == NULL) 1076 return FALSE; 1077 1078 rv = ((*attr & mask) != 0); 1079 if (clear) { 1080 *attr &= ~mask; 1081 pmap_page_protect(pg, (mask == PTE_HI_CHG) ? VM_PROT_READ : 0); 1082 } 1083 splx(s); 1084 return rv; 1085 } 1086 1087 1088 /* 1089 * Lower the protection on the specified physical page. 1090 * 1091 * There are only two cases: either the protection is going to 0, 1092 * or it is going to read-only. 1093 */ 1094 void 1095 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 1096 { 1097 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1098 vaddr_t va; 1099 struct pv_entry *pvh, *pv, *npv; 1100 struct pmap *pm; 1101 1102 pvh = pa_to_pv(pa); 1103 if (pvh == NULL) 1104 return; 1105 1106 /* Handle extra pvs which may be deleted in the operation */ 1107 for (pv = pvh->pv_next; pv; pv = npv) { 1108 npv = pv->pv_next; 1109 1110 pm = pv->pv_pm; 1111 va = pv->pv_va; 1112 pmap_protect(pm, va, va+NBPG, prot); 1113 } 1114 /* Now check the head pv */ 1115 if (pvh->pv_pm) { 1116 pv = pvh; 1117 pm = pv->pv_pm; 1118 va = pv->pv_va; 1119 pmap_protect(pm, va, va+NBPG, prot); 1120 } 1121 } 1122 1123 /* 1124 * Activate the address space for the specified process. If the process 1125 * is the current process, load the new MMU context. 1126 */ 1127 void 1128 pmap_activate(struct proc *p) 1129 { 1130 #if 0 1131 struct pcb *pcb = &p->p_addr->u_pcb; 1132 pmap_t pmap = p->p_vmspace->vm_map.pmap; 1133 1134 /* 1135 * XXX Normally performed in cpu_fork(). 1136 */ 1137 printf("pmap_activate(%p), pmap=%p\n",p,pmap); 1138 if (pcb->pcb_pm != pmap) { 1139 pcb->pcb_pm = pmap; 1140 (void) pmap_extract(pmap_kernel(), (vaddr_t)pcb->pcb_pm, 1141 (paddr_t *)&pcb->pcb_pmreal); 1142 } 1143 1144 if (p == curproc) { 1145 /* Store pointer to new current pmap. */ 1146 curpm = pcb->pcb_pmreal; 1147 } 1148 #endif 1149 } 1150 1151 /* 1152 * Deactivate the specified process's address space. 1153 */ 1154 void 1155 pmap_deactivate(struct proc *p) 1156 { 1157 } 1158 1159 /* 1160 * Synchronize caches corresponding to [addr, addr+len) in p. 1161 */ 1162 void 1163 pmap_procwr(struct proc *p, vaddr_t va, size_t len) 1164 { 1165 struct pmap *pm = p->p_vmspace->vm_map.pmap; 1166 int msr, ctx, opid; 1167 1168 1169 /* 1170 * Need to turn off IMMU and switch to user context. 1171 * (icbi uses DMMU). 1172 */ 1173 if (!(ctx = pm->pm_ctx)) { 1174 /* No context -- assign it one */ 1175 ctx_alloc(pm); 1176 ctx = pm->pm_ctx; 1177 } 1178 __asm __volatile("mfmsr %0;" 1179 "li %1, 0x20;" 1180 "andc %1,%0,%1;" 1181 "mtmsr %1;" 1182 "sync;isync;" 1183 "mfpid %1;" 1184 "mtpid %2;" 1185 "sync; isync;" 1186 "1:" 1187 "dcbf 0,%3;" 1188 "icbi 0,%3;" 1189 "addi %3,%3,32;" 1190 "addic. %4,%4,-32;" 1191 "bge 1b;" 1192 "mtpid %1;" 1193 "mtmsr %0;" 1194 "sync; isync" 1195 : "=&r" (msr), "=&r" (opid) 1196 : "r" (ctx), "r" (va), "r" (len)); 1197 } 1198 1199 1200 /* This has to be done in real mode !!! */ 1201 void 1202 ppc4xx_tlb_flush(vaddr_t va, int pid) 1203 { 1204 u_long i, found; 1205 u_long msr; 1206 1207 /* If there's no context then it can't be mapped. */ 1208 if (!pid) return; 1209 1210 asm("mfpid %1;" /* Save PID */ 1211 "mfmsr %2;" /* Save MSR */ 1212 "li %0,0;" /* Now clear MSR */ 1213 "mtmsr %0;" 1214 "mtpid %4;" /* Set PID */ 1215 "sync;" 1216 "tlbsx. %0,0,%3;" /* Search TLB */ 1217 "sync;" 1218 "mtpid %1;" /* Restore PID */ 1219 "mtmsr %2;" /* Restore MSR */ 1220 "sync;isync;" 1221 "li %1,1;" 1222 "beq 1f;" 1223 "li %1,0;" 1224 "1:" 1225 : "=&r" (i), "=&r" (found), "=&r" (msr) 1226 : "r" (va), "r" (pid)); 1227 if (found && !TLB_LOCKED(i)) { 1228 1229 /* Now flush translation */ 1230 asm volatile( 1231 "tlbwe %0,%1,0;" 1232 "sync;isync;" 1233 : : "r" (0), "r" (i)); 1234 1235 tlb_info[i].ti_ctx = 0; 1236 tlb_info[i].ti_flags = 0; 1237 tlbnext = i; 1238 /* Successful flushes */ 1239 tlbflush_ev.ev_count++; 1240 } 1241 } 1242 1243 void 1244 ppc4xx_tlb_flush_all(void) 1245 { 1246 u_long i; 1247 1248 for (i = 0; i < NTLB; i++) 1249 if (!TLB_LOCKED(i)) { 1250 asm volatile( 1251 "tlbwe %0,%1,0;" 1252 "sync;isync;" 1253 : : "r" (0), "r" (i)); 1254 tlb_info[i].ti_ctx = 0; 1255 tlb_info[i].ti_flags = 0; 1256 } 1257 1258 asm volatile("sync;isync"); 1259 } 1260 1261 /* Find a TLB entry to evict. */ 1262 static int 1263 ppc4xx_tlb_find_victim(void) 1264 { 1265 int flags; 1266 1267 for (;;) { 1268 if (++tlbnext >= NTLB) 1269 tlbnext = TLB_NRESERVED; 1270 flags = tlb_info[tlbnext].ti_flags; 1271 if (!(flags & TLBF_USED) || 1272 (flags & (TLBF_LOCKED | TLBF_REF)) == 0) { 1273 u_long va, stack = (u_long)&va; 1274 1275 if (!((tlb_info[tlbnext].ti_va ^ stack) & (~PGOFSET)) && 1276 (tlb_info[tlbnext].ti_ctx == KERNEL_PID) && 1277 (flags & TLBF_USED)) { 1278 /* Kernel stack page */ 1279 flags |= TLBF_USED; 1280 tlb_info[tlbnext].ti_flags = flags; 1281 } else { 1282 /* Found it! */ 1283 return (tlbnext); 1284 } 1285 } else { 1286 tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF); 1287 } 1288 } 1289 } 1290 1291 void 1292 ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte) 1293 { 1294 u_long th, tl, idx; 1295 tlbpid_t pid; 1296 u_short msr; 1297 paddr_t pa; 1298 int s, sz; 1299 1300 tlbenter_ev.ev_count++; 1301 1302 sz = (pte & TTE_SZ_MASK) >> TTE_SZ_SHIFT; 1303 pa = (pte & TTE_RPN_MASK(sz)); 1304 th = (va & TLB_EPN_MASK) | (sz << TLB_SIZE_SHFT) | TLB_VALID; 1305 tl = (pte & ~TLB_RPN_MASK) | pa; 1306 tl |= ppc4xx_tlbflags(va, pa); 1307 1308 s = splhigh(); 1309 idx = ppc4xx_tlb_find_victim(); 1310 1311 #ifdef DIAGNOSTIC 1312 if ((idx < TLB_NRESERVED) || (idx >= NTLB)) { 1313 panic("ppc4xx_tlb_enter: repacing entry %ld", idx); 1314 } 1315 #endif 1316 1317 tlb_info[idx].ti_va = (va & TLB_EPN_MASK); 1318 tlb_info[idx].ti_ctx = ctx; 1319 tlb_info[idx].ti_flags = TLBF_USED | TLBF_REF; 1320 1321 asm volatile( 1322 "mfmsr %0;" /* Save MSR */ 1323 "li %1,0;" 1324 "tlbwe %1,%3,0;" /* Invalidate old entry. */ 1325 "mtmsr %1;" /* Clear MSR */ 1326 "mfpid %1;" /* Save old PID */ 1327 "mtpid %2;" /* Load translation ctx */ 1328 "sync; isync;" 1329 #ifdef DEBUG 1330 "andi. %3,%3,63;" 1331 "tweqi %3,0;" /* XXXXX DEBUG trap on index 0 */ 1332 #endif 1333 "tlbwe %4,%3,1; tlbwe %5,%3,0;" /* Set TLB */ 1334 "sync; isync;" 1335 "mtpid %1; mtmsr %0;" /* Restore PID and MSR */ 1336 "sync; isync;" 1337 : "=&r" (msr), "=&r" (pid) 1338 : "r" (ctx), "r" (idx), "r" (tl), "r" (th)); 1339 splx(s); 1340 } 1341 1342 void 1343 ppc4xx_tlb_unpin(int i) 1344 { 1345 1346 if (i == -1) 1347 for (i = 0; i < TLB_NRESERVED; i++) 1348 tlb_info[i].ti_flags &= ~TLBF_LOCKED; 1349 else 1350 tlb_info[i].ti_flags &= ~TLBF_LOCKED; 1351 } 1352 1353 void 1354 ppc4xx_tlb_init(void) 1355 { 1356 int i; 1357 1358 /* Mark reserved TLB entries */ 1359 for (i = 0; i < TLB_NRESERVED; i++) { 1360 tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED; 1361 tlb_info[i].ti_ctx = KERNEL_PID; 1362 } 1363 1364 /* Setup security zones */ 1365 /* Z0 - accessible by kernel only if TLB entry permissions allow 1366 * Z1,Z2 - access is controlled by TLB entry permissions 1367 * Z3 - full access regardless of TLB entry permissions 1368 */ 1369 1370 asm volatile( 1371 "mtspr %0,%1;" 1372 "sync;" 1373 :: "K"(SPR_ZPR), "r" (0x1b000000)); 1374 } 1375 1376 1377 /* 1378 * We should pass the ctx in from trap code. 1379 */ 1380 int 1381 pmap_tlbmiss(vaddr_t va, int ctx) 1382 { 1383 volatile u_int *pte; 1384 u_long tte; 1385 1386 tlbmiss_ev.ev_count++; 1387 1388 /* 1389 * XXXX We will reserve 0-0x80000000 for va==pa mappings. 1390 */ 1391 if (ctx != KERNEL_PID || (va & 0x80000000)) { 1392 pte = pte_find((struct pmap *)ctxbusy[ctx], va); 1393 if (pte == NULL) { 1394 /* Map unmanaged addresses directly for kernel access */ 1395 return 1; 1396 } 1397 tte = *pte; 1398 if (tte == 0) { 1399 return 1; 1400 } 1401 } else { 1402 /* Create a 16MB writeable mapping. */ 1403 #ifdef PPC_4XX_NOCACHE 1404 tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_I | TTE_WR; 1405 #else 1406 tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR; 1407 #endif 1408 } 1409 tlbhit_ev.ev_count++; 1410 ppc4xx_tlb_enter(ctx, va, tte); 1411 1412 return 0; 1413 } 1414 1415 /* 1416 * Flush all the entries matching a context from the TLB. 1417 */ 1418 static int 1419 ctx_flush(int cnum) 1420 { 1421 int i; 1422 1423 /* We gotta steal this context */ 1424 for (i = TLB_NRESERVED; i < NTLB; i++) { 1425 if (tlb_info[i].ti_ctx == cnum) { 1426 /* Can't steal ctx if it has a locked entry. */ 1427 if (TLB_LOCKED(i)) { 1428 #ifdef DIAGNOSTIC 1429 printf("ctx_flush: can't invalidate " 1430 "locked mapping %d " 1431 "for context %d\n", i, cnum); 1432 #ifdef DDB 1433 Debugger(); 1434 #endif 1435 #endif 1436 return (1); 1437 } 1438 #ifdef DIAGNOSTIC 1439 if (i < TLB_NRESERVED) 1440 panic("TLB entry %d not locked", i); 1441 #endif 1442 /* Invalidate particular TLB entry regardless of locked status */ 1443 asm volatile("tlbwe %0,%1,0" : :"r"(0),"r"(i)); 1444 tlb_info[i].ti_flags = 0; 1445 } 1446 } 1447 return (0); 1448 } 1449 1450 /* 1451 * Allocate a context. If necessary, steal one from someone else. 1452 * 1453 * The new context is flushed from the TLB before returning. 1454 */ 1455 int 1456 ctx_alloc(struct pmap *pm) 1457 { 1458 int s, cnum; 1459 static int next = MINCTX; 1460 1461 if (pm == pmap_kernel()) { 1462 #ifdef DIAGNOSTIC 1463 printf("ctx_alloc: kernel pmap!\n"); 1464 #endif 1465 return (0); 1466 } 1467 s = splvm(); 1468 1469 /* Find a likely context. */ 1470 cnum = next; 1471 do { 1472 if ((++cnum) > NUMCTX) 1473 cnum = MINCTX; 1474 } while (ctxbusy[cnum] != NULL && cnum != next); 1475 1476 /* Now clean it out */ 1477 oops: 1478 if (cnum < MINCTX) 1479 cnum = MINCTX; /* Never steal ctx 0 or 1 */ 1480 if (ctx_flush(cnum)) { 1481 /* oops -- something's wired. */ 1482 if ((++cnum) > NUMCTX) 1483 cnum = MINCTX; 1484 goto oops; 1485 } 1486 1487 if (ctxbusy[cnum]) { 1488 #ifdef DEBUG 1489 /* We should identify this pmap and clear it */ 1490 printf("Warning: stealing context %d\n", cnum); 1491 #endif 1492 ctxbusy[cnum]->pm_ctx = 0; 1493 } 1494 ctxbusy[cnum] = pm; 1495 next = cnum; 1496 splx(s); 1497 pm->pm_ctx = cnum; 1498 1499 return cnum; 1500 } 1501 1502 /* 1503 * Give away a context. 1504 */ 1505 void 1506 ctx_free(struct pmap *pm) 1507 { 1508 int oldctx; 1509 1510 oldctx = pm->pm_ctx; 1511 1512 if (oldctx == 0) 1513 panic("ctx_free: freeing kernel context"); 1514 #ifdef DIAGNOSTIC 1515 if (ctxbusy[oldctx] == 0) 1516 printf("ctx_free: freeing free context %d\n", oldctx); 1517 if (ctxbusy[oldctx] != pm) { 1518 printf("ctx_free: freeing someone esle's context\n " 1519 "ctxbusy[%d] = %p, pm->pm_ctx = %p\n", 1520 oldctx, (void *)(u_long)ctxbusy[oldctx], pm); 1521 #ifdef DDB 1522 Debugger(); 1523 #endif 1524 } 1525 #endif 1526 /* We should verify it has not been stolen and reallocated... */ 1527 ctxbusy[oldctx] = NULL; 1528 ctx_flush(oldctx); 1529 } 1530 1531 1532 #ifdef DEBUG 1533 /* 1534 * Test ref/modify handling. 1535 */ 1536 void pmap_testout __P((void)); 1537 void 1538 pmap_testout() 1539 { 1540 vaddr_t va; 1541 volatile int *loc; 1542 int val = 0; 1543 paddr_t pa; 1544 struct vm_page *pg; 1545 int ref, mod; 1546 1547 /* Allocate a page */ 1548 va = (vaddr_t)uvm_km_alloc1(kernel_map, NBPG, 1); 1549 loc = (int*)va; 1550 1551 pmap_extract(pmap_kernel(), va, &pa); 1552 pg = PHYS_TO_VM_PAGE(pa); 1553 pmap_unwire(pmap_kernel(), va); 1554 1555 pmap_remove(pmap_kernel(), va, va+1); 1556 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1557 pmap_update(pmap_kernel()); 1558 1559 /* Now clear reference and modify */ 1560 ref = pmap_clear_reference(pg); 1561 mod = pmap_clear_modify(pg); 1562 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1563 (void *)(u_long)va, (long)pa, 1564 ref, mod); 1565 1566 /* Check it's properly cleared */ 1567 ref = pmap_is_referenced(pg); 1568 mod = pmap_is_modified(pg); 1569 printf("Checking cleared page: ref %d, mod %d\n", 1570 ref, mod); 1571 1572 /* Reference page */ 1573 val = *loc; 1574 1575 ref = pmap_is_referenced(pg); 1576 mod = pmap_is_modified(pg); 1577 printf("Referenced page: ref %d, mod %d val %x\n", 1578 ref, mod, val); 1579 1580 /* Now clear reference and modify */ 1581 ref = pmap_clear_reference(pg); 1582 mod = pmap_clear_modify(pg); 1583 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1584 (void *)(u_long)va, (long)pa, 1585 ref, mod); 1586 1587 /* Modify page */ 1588 *loc = 1; 1589 1590 ref = pmap_is_referenced(pg); 1591 mod = pmap_is_modified(pg); 1592 printf("Modified page: ref %d, mod %d\n", 1593 ref, mod); 1594 1595 /* Now clear reference and modify */ 1596 ref = pmap_clear_reference(pg); 1597 mod = pmap_clear_modify(pg); 1598 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1599 (void *)(u_long)va, (long)pa, 1600 ref, mod); 1601 1602 /* Check it's properly cleared */ 1603 ref = pmap_is_referenced(pg); 1604 mod = pmap_is_modified(pg); 1605 printf("Checking cleared page: ref %d, mod %d\n", 1606 ref, mod); 1607 1608 /* Modify page */ 1609 *loc = 1; 1610 1611 ref = pmap_is_referenced(pg); 1612 mod = pmap_is_modified(pg); 1613 printf("Modified page: ref %d, mod %d\n", 1614 ref, mod); 1615 1616 /* Check pmap_protect() */ 1617 pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ); 1618 pmap_update(pmap_kernel()); 1619 ref = pmap_is_referenced(pg); 1620 mod = pmap_is_modified(pg); 1621 printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n", 1622 ref, mod); 1623 1624 /* Now clear reference and modify */ 1625 ref = pmap_clear_reference(pg); 1626 mod = pmap_clear_modify(pg); 1627 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1628 (void *)(u_long)va, (long)pa, 1629 ref, mod); 1630 1631 /* Reference page */ 1632 val = *loc; 1633 1634 ref = pmap_is_referenced(pg); 1635 mod = pmap_is_modified(pg); 1636 printf("Referenced page: ref %d, mod %d val %x\n", 1637 ref, mod, val); 1638 1639 /* Now clear reference and modify */ 1640 ref = pmap_clear_reference(pg); 1641 mod = pmap_clear_modify(pg); 1642 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1643 (void *)(u_long)va, (long)pa, 1644 ref, mod); 1645 1646 /* Modify page */ 1647 #if 0 1648 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1649 pmap_update(pmap_kernel()); 1650 #endif 1651 *loc = 1; 1652 1653 ref = pmap_is_referenced(pg); 1654 mod = pmap_is_modified(pg); 1655 printf("Modified page: ref %d, mod %d\n", 1656 ref, mod); 1657 1658 /* Check pmap_protect() */ 1659 pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE); 1660 pmap_update(pmap_kernel()); 1661 ref = pmap_is_referenced(pg); 1662 mod = pmap_is_modified(pg); 1663 printf("pmap_protect(): ref %d, mod %d\n", 1664 ref, mod); 1665 1666 /* Now clear reference and modify */ 1667 ref = pmap_clear_reference(pg); 1668 mod = pmap_clear_modify(pg); 1669 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1670 (void *)(u_long)va, (long)pa, 1671 ref, mod); 1672 1673 /* Reference page */ 1674 val = *loc; 1675 1676 ref = pmap_is_referenced(pg); 1677 mod = pmap_is_modified(pg); 1678 printf("Referenced page: ref %d, mod %d val %x\n", 1679 ref, mod, val); 1680 1681 /* Now clear reference and modify */ 1682 ref = pmap_clear_reference(pg); 1683 mod = pmap_clear_modify(pg); 1684 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1685 (void *)(u_long)va, (long)pa, 1686 ref, mod); 1687 1688 /* Modify page */ 1689 #if 0 1690 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1691 pmap_update(pmap_kernel()); 1692 #endif 1693 *loc = 1; 1694 1695 ref = pmap_is_referenced(pg); 1696 mod = pmap_is_modified(pg); 1697 printf("Modified page: ref %d, mod %d\n", 1698 ref, mod); 1699 1700 /* Check pmap_pag_protect() */ 1701 pmap_page_protect(pg, VM_PROT_READ); 1702 ref = pmap_is_referenced(pg); 1703 mod = pmap_is_modified(pg); 1704 printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n", 1705 ref, mod); 1706 1707 /* Now clear reference and modify */ 1708 ref = pmap_clear_reference(pg); 1709 mod = pmap_clear_modify(pg); 1710 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1711 (void *)(u_long)va, (long)pa, 1712 ref, mod); 1713 1714 /* Reference page */ 1715 val = *loc; 1716 1717 ref = pmap_is_referenced(pg); 1718 mod = pmap_is_modified(pg); 1719 printf("Referenced page: ref %d, mod %d val %x\n", 1720 ref, mod, val); 1721 1722 /* Now clear reference and modify */ 1723 ref = pmap_clear_reference(pg); 1724 mod = pmap_clear_modify(pg); 1725 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1726 (void *)(u_long)va, (long)pa, 1727 ref, mod); 1728 1729 /* Modify page */ 1730 #if 0 1731 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1732 pmap_update(pmap_kernel()); 1733 #endif 1734 *loc = 1; 1735 1736 ref = pmap_is_referenced(pg); 1737 mod = pmap_is_modified(pg); 1738 printf("Modified page: ref %d, mod %d\n", 1739 ref, mod); 1740 1741 /* Check pmap_pag_protect() */ 1742 pmap_page_protect(pg, VM_PROT_NONE); 1743 ref = pmap_is_referenced(pg); 1744 mod = pmap_is_modified(pg); 1745 printf("pmap_page_protect(): ref %d, mod %d\n", 1746 ref, mod); 1747 1748 /* Now clear reference and modify */ 1749 ref = pmap_clear_reference(pg); 1750 mod = pmap_clear_modify(pg); 1751 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1752 (void *)(u_long)va, (long)pa, 1753 ref, mod); 1754 1755 1756 /* Reference page */ 1757 val = *loc; 1758 1759 ref = pmap_is_referenced(pg); 1760 mod = pmap_is_modified(pg); 1761 printf("Referenced page: ref %d, mod %d val %x\n", 1762 ref, mod, val); 1763 1764 /* Now clear reference and modify */ 1765 ref = pmap_clear_reference(pg); 1766 mod = pmap_clear_modify(pg); 1767 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1768 (void *)(u_long)va, (long)pa, 1769 ref, mod); 1770 1771 /* Modify page */ 1772 #if 0 1773 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1774 pmap_update(pmap_kernel()); 1775 #endif 1776 *loc = 1; 1777 1778 ref = pmap_is_referenced(pg); 1779 mod = pmap_is_modified(pg); 1780 printf("Modified page: ref %d, mod %d\n", 1781 ref, mod); 1782 1783 /* Unmap page */ 1784 pmap_remove(pmap_kernel(), va, va+1); 1785 pmap_update(pmap_kernel()); 1786 ref = pmap_is_referenced(pg); 1787 mod = pmap_is_modified(pg); 1788 printf("Unmapped page: ref %d, mod %d\n", ref, mod); 1789 1790 /* Now clear reference and modify */ 1791 ref = pmap_clear_reference(pg); 1792 mod = pmap_clear_modify(pg); 1793 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1794 (void *)(u_long)va, (long)pa, ref, mod); 1795 1796 /* Check it's properly cleared */ 1797 ref = pmap_is_referenced(pg); 1798 mod = pmap_is_modified(pg); 1799 printf("Checking cleared page: ref %d, mod %d\n", 1800 ref, mod); 1801 1802 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 1803 VM_PROT_ALL|PMAP_WIRED); 1804 uvm_km_free(kernel_map, (vaddr_t)va, NBPG); 1805 } 1806 #endif 1807