1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department. 8 * 9 * %sccs.include.redist.c% 10 * 11 * @(#)pmap.c 7.7 (Berkeley) 09/12/91 12 */ 13 14 /* 15 * HP9000/300 series physical map management code. 16 * For 68020/68030 machines with HP, 68551, or 68030 MMUs 17 * (models 320,350,318,319,330,340,360,370,345,375) 18 * Don't even pay lip service to multiprocessor support. 19 * 20 * XXX will only work for PAGE_SIZE == NBPG (hppagesperpage == 1) 21 * right now because of the assumed one-to-one relationship of PT 22 * pages to STEs. 23 */ 24 25 /* 26 * Manages physical address maps. 27 * 28 * In addition to hardware address maps, this 29 * module is called upon to provide software-use-only 30 * maps which may or may not be stored in the same 31 * form as hardware maps. These pseudo-maps are 32 * used to store intermediate results from copy 33 * operations to and from address spaces. 34 * 35 * Since the information managed by this module is 36 * also stored by the logical address mapping module, 37 * this module may throw away valid virtual-to-physical 38 * mappings at almost any time. However, invalidations 39 * of virtual-to-physical mappings must be done as 40 * requested. 41 * 42 * In order to cope with hardware architectures which 43 * make virtual-to-physical map invalidates expensive, 44 * this module may delay invalidate or reduced protection 45 * operations until such time as they are actually 46 * necessary. This module is given full information as 47 * to which processors are currently using which maps, 48 * and to when physical maps must be made correct. 49 */ 50 51 #include "param.h" 52 #include "proc.h" 53 #include "malloc.h" 54 #include "user.h" 55 56 #include "pte.h" 57 58 #include "vm/vm.h" 59 #include "vm/vm_kern.h" 60 #include "vm/vm_page.h" 61 62 #include "../include/cpu.h" 63 64 /* 65 * Allocate various and sundry SYSMAPs used in the days of old VM 66 * and not yet converted. XXX. 67 */ 68 #define BSDVM_COMPAT 1 69 70 #ifdef DEBUG 71 struct { 72 int collectscans; 73 int collectpages; 74 int kpttotal; 75 int kptinuse; 76 int kptmaxuse; 77 } kpt_stats; 78 struct { 79 int kernel; /* entering kernel mapping */ 80 int user; /* entering user mapping */ 81 int ptpneeded; /* needed to allocate a PT page */ 82 int pwchange; /* no mapping change, just wiring or protection */ 83 int wchange; /* no mapping change, just wiring */ 84 int mchange; /* was mapped but mapping to different page */ 85 int managed; /* a managed page */ 86 int firstpv; /* first mapping for this PA */ 87 int secondpv; /* second mapping for this PA */ 88 int ci; /* cache inhibited */ 89 int unmanaged; /* not a managed page */ 90 int flushes; /* cache flushes */ 91 } enter_stats; 92 struct { 93 int calls; 94 int removes; 95 int pvfirst; 96 int pvsearch; 97 int ptinvalid; 98 int uflushes; 99 int sflushes; 100 } remove_stats; 101 102 int debugmap = 0; 103 int pmapdebug = 0x2000; 104 #define PDB_FOLLOW 0x0001 105 #define PDB_INIT 0x0002 106 #define PDB_ENTER 0x0004 107 #define PDB_REMOVE 0x0008 108 #define PDB_CREATE 0x0010 109 #define PDB_PTPAGE 0x0020 110 #define PDB_CACHE 0x0040 111 #define PDB_BITS 0x0080 112 #define PDB_COLLECT 0x0100 113 #define PDB_PROTECT 0x0200 114 #define PDB_SEGTAB 0x0400 115 #define PDB_PARANOIA 0x2000 116 #define PDB_WIRING 0x4000 117 #define PDB_PVDUMP 0x8000 118 119 int pmapvacflush = 0; 120 #define PVF_ENTER 0x01 121 #define PVF_REMOVE 0x02 122 #define PVF_PROTECT 0x04 123 #define PVF_TOTAL 0x80 124 125 extern vm_offset_t pager_sva, pager_eva; 126 #endif 127 128 /* 129 * Get STEs and PTEs for user/kernel address space 130 */ 131 #define pmap_ste(m, v) (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT])) 132 #define pmap_pte(m, v) (&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT])) 133 134 #define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME) 135 136 #define pmap_ste_v(pte) ((pte)->sg_v) 137 #define pmap_pte_w(pte) ((pte)->pg_w) 138 #define pmap_pte_ci(pte) ((pte)->pg_ci) 139 #define pmap_pte_m(pte) ((pte)->pg_m) 140 #define pmap_pte_u(pte) ((pte)->pg_u) 141 #define pmap_pte_v(pte) ((pte)->pg_v) 142 #define pmap_pte_set_w(pte, v) ((pte)->pg_w = (v)) 143 #define pmap_pte_set_prot(pte, v) ((pte)->pg_prot = (v)) 144 145 /* 146 * Given a map and a machine independent protection code, 147 * convert to a vax protection code. 148 */ 149 #define pte_prot(m, p) (protection_codes[p]) 150 int protection_codes[8]; 151 152 /* 153 * Kernel page table page management. 154 */ 155 struct kpt_page { 156 struct kpt_page *kpt_next; /* link on either used or free list */ 157 vm_offset_t kpt_va; /* always valid kernel VA */ 158 vm_offset_t kpt_pa; /* PA of this page (for speed) */ 159 }; 160 struct kpt_page *kpt_free_list, *kpt_used_list; 161 struct kpt_page *kpt_pages; 162 163 /* 164 * Kernel segment/page table and page table map. 165 * The page table map gives us a level of indirection we need to dynamically 166 * expand the page table. It is essentially a copy of the segment table 167 * with PTEs instead of STEs. All are initialized in locore at boot time. 168 * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs. 169 * Segtabzero is an empty segment table which all processes share til they 170 * reference something. 171 */ 172 st_entry_t *Sysseg; 173 pt_entry_t *Sysmap, *Sysptmap; 174 st_entry_t *Segtabzero; 175 #if BSDVM_COMPAT 176 vm_size_t Sysptsize = VM_KERNEL_PT_PAGES + 4 / NPTEPG; 177 #else 178 vm_size_t Sysptsize = VM_KERNEL_PT_PAGES; 179 #endif 180 181 struct pmap kernel_pmap_store; 182 pmap_t kernel_pmap; 183 vm_map_t pt_map; 184 185 vm_offset_t avail_start; /* PA of first available physical page */ 186 vm_offset_t avail_end; /* PA of last available physical page */ 187 vm_size_t mem_size; /* memory size in bytes */ 188 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/ 189 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 190 vm_offset_t vm_first_phys; /* PA of first managed page */ 191 vm_offset_t vm_last_phys; /* PA just past last managed page */ 192 int hppagesperpage; /* PAGE_SIZE / HP_PAGE_SIZE */ 193 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 194 int pmap_aliasmask; /* seperation at which VA aliasing ok */ 195 char *pmap_attributes; /* reference and modify bits */ 196 197 boolean_t pmap_testbit(); 198 void pmap_enter_ptpage(); 199 200 #if BSDVM_COMPAT 201 #include "msgbuf.h" 202 203 /* 204 * All those kernel PT submaps that BSD is so fond of 205 */ 206 struct pte *CMAP1, *CMAP2, *mmap; 207 caddr_t CADDR1, CADDR2, vmmap; 208 struct pte *msgbufmap; 209 struct msgbuf *msgbufp; 210 #endif 211 212 /* 213 * Bootstrap the system enough to run with virtual memory. 214 * Map the kernel's code and data, and allocate the system page table. 215 * 216 * On the HP this is called after mapping has already been enabled 217 * and just syncs the pmap module with what has already been done. 218 * [We can't call it easily with mapping off since the kernel is not 219 * mapped with PA == VA, hence we would have to relocate every address 220 * from the linked base (virtual) address 0 to the actual (physical) 221 * address of 0xFFxxxxxx.] 222 */ 223 void 224 pmap_bootstrap(firstaddr, loadaddr) 225 vm_offset_t firstaddr; 226 vm_offset_t loadaddr; 227 { 228 #if BSDVM_COMPAT 229 vm_offset_t va; 230 struct pte *pte; 231 #endif 232 extern vm_offset_t maxmem, physmem; 233 234 avail_start = firstaddr; 235 avail_end = maxmem << PGSHIFT; 236 237 /* XXX: allow for msgbuf */ 238 avail_end -= hp300_round_page(sizeof(struct msgbuf)); 239 240 mem_size = physmem << PGSHIFT; 241 virtual_avail = VM_MIN_KERNEL_ADDRESS + (firstaddr - loadaddr); 242 virtual_end = VM_MAX_KERNEL_ADDRESS; 243 hppagesperpage = PAGE_SIZE / HP_PAGE_SIZE; 244 245 /* 246 * Determine VA aliasing distance if any 247 */ 248 if (ectype == EC_VIRT) 249 switch (machineid) { 250 case HP_320: 251 pmap_aliasmask = 0x3fff; /* 16k */ 252 break; 253 case HP_350: 254 pmap_aliasmask = 0x7fff; /* 32k */ 255 break; 256 } 257 258 /* 259 * Initialize protection array. 260 */ 261 hp300_protection_init(); 262 263 /* 264 * The kernel's pmap is statically allocated so we don't 265 * have to use pmap_create, which is unlikely to work 266 * correctly at this part of the boot sequence. 267 */ 268 kernel_pmap = &kernel_pmap_store; 269 270 /* 271 * Kernel page/segment table allocated in locore, 272 * just initialize pointers. 273 */ 274 kernel_pmap->pm_stab = Sysseg; 275 kernel_pmap->pm_ptab = Sysmap; 276 277 simple_lock_init(&kernel_pmap->pm_lock); 278 kernel_pmap->pm_count = 1; 279 280 #if BSDVM_COMPAT 281 /* 282 * Allocate all the submaps we need 283 */ 284 #define SYSMAP(c, p, v, n) \ 285 v = (c)va; va += ((n)*HP_PAGE_SIZE); p = pte; pte += (n); 286 287 va = virtual_avail; 288 pte = pmap_pte(kernel_pmap, va); 289 290 SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 ) 291 SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 ) 292 SYSMAP(caddr_t ,mmap ,vmmap ,1 ) 293 SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 ) 294 virtual_avail = va; 295 #endif 296 } 297 298 /* 299 * Bootstrap memory allocator. This function allows for early dynamic 300 * memory allocation until the virtual memory system has been bootstrapped. 301 * After that point, either kmem_alloc or malloc should be used. This 302 * function works by stealing pages from the (to be) managed page pool, 303 * stealing virtual address space, then mapping the pages and zeroing them. 304 * 305 * It should be used from pmap_bootstrap till vm_page_startup, afterwards 306 * it cannot be used, and will generate a panic if tried. Note that this 307 * memory will never be freed, and in essence it is wired down. 308 */ 309 void * 310 pmap_bootstrap_alloc(size) { 311 vm_offset_t val; 312 int i; 313 extern boolean_t vm_page_startup_initialized; 314 315 if (vm_page_startup_initialized) 316 panic("pmap_bootstrap_alloc: called after startup initialized"); 317 size = round_page(size); 318 val = virtual_avail; 319 320 virtual_avail = pmap_map(virtual_avail, avail_start, 321 avail_start + size, VM_PROT_READ|VM_PROT_WRITE); 322 avail_start += size; 323 324 blkclr ((caddr_t) val, size); 325 return ((void *) val); 326 } 327 328 /* 329 * Initialize the pmap module. 330 * Called by vm_init, to initialize any structures that the pmap 331 * system needs to map virtual memory. 332 */ 333 void 334 pmap_init(phys_start, phys_end) 335 vm_offset_t phys_start, phys_end; 336 { 337 vm_offset_t addr, addr2; 338 vm_size_t npg, s; 339 int rv; 340 extern char kstack[]; 341 342 #ifdef DEBUG 343 if (pmapdebug & PDB_FOLLOW) 344 printf("pmap_init(%x, %x)\n", phys_start, phys_end); 345 #endif 346 /* 347 * Now that kernel map has been allocated, we can mark as 348 * unavailable regions which we have mapped in locore. 349 */ 350 addr = (vm_offset_t) intiobase; 351 (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0, 352 &addr, hp300_ptob(IIOMAPSIZE+EIOMAPSIZE), FALSE); 353 if (addr != (vm_offset_t)intiobase) 354 goto bogons; 355 addr = (vm_offset_t) Sysmap; 356 vm_object_reference(kernel_object); 357 (void) vm_map_find(kernel_map, kernel_object, addr, 358 &addr, HP_MAX_PTSIZE, FALSE); 359 /* 360 * If this fails it is probably because the static portion of 361 * the kernel page table isn't big enough and we overran the 362 * page table map. Need to adjust pmap_size() in hp300_init.c. 363 */ 364 if (addr != (vm_offset_t)Sysmap) 365 goto bogons; 366 367 addr = (vm_offset_t) kstack; 368 vm_object_reference(kernel_object); 369 (void) vm_map_find(kernel_map, kernel_object, addr, 370 &addr, hp300_ptob(UPAGES), FALSE); 371 if (addr != (vm_offset_t)kstack) 372 bogons: 373 panic("pmap_init: bogons in the VM system!\n"); 374 375 #ifdef DEBUG 376 if (pmapdebug & PDB_INIT) { 377 printf("pmap_init: Sysseg %x, Sysmap %x, Sysptmap %x\n", 378 Sysseg, Sysmap, Sysptmap); 379 printf(" pstart %x, pend %x, vstart %x, vend %x\n", 380 avail_start, avail_end, virtual_avail, virtual_end); 381 } 382 #endif 383 384 /* 385 * Allocate memory for random pmap data structures. Includes the 386 * initial segment table, pv_head_table and pmap_attributes. 387 */ 388 npg = atop(phys_end - phys_start); 389 s = (vm_size_t) (HP_STSIZE + sizeof(struct pv_entry) * npg + npg); 390 s = round_page(s); 391 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 392 Segtabzero = (st_entry_t *) addr; 393 addr += HP_STSIZE; 394 pv_table = (pv_entry_t) addr; 395 addr += sizeof(struct pv_entry) * npg; 396 pmap_attributes = (char *) addr; 397 #ifdef DEBUG 398 if (pmapdebug & PDB_INIT) 399 printf("pmap_init: %x bytes (%x pgs): seg %x tbl %x attr %x\n", 400 s, npg, Segtabzero, pv_table, pmap_attributes); 401 #endif 402 403 /* 404 * Allocate physical memory for kernel PT pages and their management. 405 * We need 1 PT page per possible task plus some slop. 406 */ 407 npg = min(atop(HP_MAX_KPTSIZE), maxproc+16); 408 s = ptoa(npg) + round_page(npg * sizeof(struct kpt_page)); 409 410 /* 411 * Verify that space will be allocated in region for which 412 * we already have kernel PT pages. 413 */ 414 addr = 0; 415 rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE); 416 if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap) 417 panic("pmap_init: kernel PT too small"); 418 vm_map_remove(kernel_map, addr, addr + s); 419 420 /* 421 * Now allocate the space and link the pages together to 422 * form the KPT free list. 423 */ 424 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 425 s = ptoa(npg); 426 addr2 = addr + s; 427 kpt_pages = &((struct kpt_page *)addr2)[npg]; 428 kpt_free_list = (struct kpt_page *) 0; 429 do { 430 addr2 -= HP_PAGE_SIZE; 431 (--kpt_pages)->kpt_next = kpt_free_list; 432 kpt_free_list = kpt_pages; 433 kpt_pages->kpt_va = addr2; 434 kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2); 435 } while (addr != addr2); 436 #ifdef DEBUG 437 kpt_stats.kpttotal = atop(s); 438 if (pmapdebug & PDB_INIT) 439 printf("pmap_init: KPT: %d pages from %x to %x\n", 440 atop(s), addr, addr + s); 441 #endif 442 443 /* 444 * Slightly modified version of kmem_suballoc() to get page table 445 * map where we want it. 446 */ 447 addr = HP_PTBASE; 448 s = min(HP_PTMAXSIZE, maxproc*HP_MAX_PTSIZE); 449 addr2 = addr + s; 450 rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE); 451 if (rv != KERN_SUCCESS) 452 panic("pmap_init: cannot allocate space for PT map"); 453 pmap_reference(vm_map_pmap(kernel_map)); 454 pt_map = vm_map_create(vm_map_pmap(kernel_map), addr, addr2, TRUE); 455 if (pt_map == NULL) 456 panic("pmap_init: cannot create pt_map"); 457 rv = vm_map_submap(kernel_map, addr, addr2, pt_map); 458 if (rv != KERN_SUCCESS) 459 panic("pmap_init: cannot map range to pt_map"); 460 #ifdef DEBUG 461 if (pmapdebug & PDB_INIT) 462 printf("pmap_init: pt_map [%x - %x)\n", addr, addr2); 463 #endif 464 465 /* 466 * Now it is safe to enable pv_table recording. 467 */ 468 vm_first_phys = phys_start; 469 vm_last_phys = phys_end; 470 pmap_initialized = TRUE; 471 } 472 473 /* 474 * Used to map a range of physical addresses into kernel 475 * virtual address space. 476 * 477 * For now, VM is already on, we only need to map the 478 * specified memory. 479 */ 480 vm_offset_t 481 pmap_map(virt, start, end, prot) 482 vm_offset_t virt; 483 vm_offset_t start; 484 vm_offset_t end; 485 int prot; 486 { 487 #ifdef DEBUG 488 if (pmapdebug & PDB_FOLLOW) 489 printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot); 490 #endif 491 while (start < end) { 492 pmap_enter(kernel_pmap, virt, start, prot, FALSE); 493 virt += PAGE_SIZE; 494 start += PAGE_SIZE; 495 } 496 return(virt); 497 } 498 499 /* 500 * Create and return a physical map. 501 * 502 * If the size specified for the map 503 * is zero, the map is an actual physical 504 * map, and may be referenced by the 505 * hardware. 506 * 507 * If the size specified is non-zero, 508 * the map will be used in software only, and 509 * is bounded by that size. 510 */ 511 pmap_t 512 pmap_create(size) 513 vm_size_t size; 514 { 515 register pmap_t pmap; 516 517 #ifdef DEBUG 518 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 519 printf("pmap_create(%x)\n", size); 520 #endif 521 /* 522 * Software use map does not need a pmap 523 */ 524 if (size) 525 return(NULL); 526 527 /* XXX: is it ok to wait here? */ 528 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); 529 #ifdef notifwewait 530 if (pmap == NULL) 531 panic("pmap_create: cannot allocate a pmap"); 532 #endif 533 bzero(pmap, sizeof(*pmap)); 534 pmap_pinit(pmap); 535 return (pmap); 536 } 537 538 /* 539 * Initialize a preallocated and zeroed pmap structure, 540 * such as one in a vmspace structure. 541 */ 542 void 543 pmap_pinit(pmap) 544 register struct pmap *pmap; 545 { 546 547 #ifdef DEBUG 548 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 549 printf("pmap_pinit(%x)\n", pmap); 550 #endif 551 /* 552 * No need to allocate page table space yet but we do need a 553 * valid segment table. Initially, we point everyone at the 554 * "null" segment table. On the first pmap_enter, a real 555 * segment table will be allocated. 556 */ 557 pmap->pm_stab = Segtabzero; 558 pmap->pm_stchanged = TRUE; 559 pmap->pm_count = 1; 560 simple_lock_init(&pmap->pm_lock); 561 } 562 563 /* 564 * Retire the given physical map from service. 565 * Should only be called if the map contains 566 * no valid mappings. 567 */ 568 void 569 pmap_destroy(pmap) 570 register pmap_t pmap; 571 { 572 int count; 573 574 #ifdef DEBUG 575 if (pmapdebug & PDB_FOLLOW) 576 printf("pmap_destroy(%x)\n", pmap); 577 #endif 578 if (pmap == NULL) 579 return; 580 581 simple_lock(&pmap->pm_lock); 582 count = --pmap->pm_count; 583 simple_unlock(&pmap->pm_lock); 584 if (count == 0) { 585 pmap_release(pmap); 586 free((caddr_t)pmap, M_VMPMAP); 587 } 588 } 589 590 /* 591 * Release any resources held by the given physical map. 592 * Called when a pmap initialized by pmap_pinit is being released. 593 * Should only be called if the map contains no valid mappings. 594 */ 595 void 596 pmap_release(pmap) 597 register struct pmap *pmap; 598 { 599 600 #ifdef DEBUG 601 if (pmapdebug & PDB_FOLLOW) 602 printf("pmap_release(%x)\n", pmap); 603 #endif 604 #ifdef notdef /* DIAGNOSTIC */ 605 /* count would be 0 from pmap_destroy... */ 606 simple_lock(&pmap->pm_lock); 607 if (pmap->pm_count != 1) 608 panic("pmap_release count"); 609 #endif 610 if (pmap->pm_ptab) 611 kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab, 612 HP_MAX_PTSIZE); 613 if (pmap->pm_stab != Segtabzero) 614 kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, HP_STSIZE); 615 } 616 617 /* 618 * Add a reference to the specified pmap. 619 */ 620 void 621 pmap_reference(pmap) 622 pmap_t pmap; 623 { 624 #ifdef DEBUG 625 if (pmapdebug & PDB_FOLLOW) 626 printf("pmap_reference(%x)\n", pmap); 627 #endif 628 if (pmap != NULL) { 629 simple_lock(&pmap->pm_lock); 630 pmap->pm_count++; 631 simple_unlock(&pmap->pm_lock); 632 } 633 } 634 635 /* 636 * Remove the given range of addresses from the specified map. 637 * 638 * It is assumed that the start and end are properly 639 * rounded to the page size. 640 */ 641 void 642 pmap_remove(pmap, sva, eva) 643 register pmap_t pmap; 644 vm_offset_t sva, eva; 645 { 646 register vm_offset_t pa, va; 647 register pt_entry_t *pte; 648 register pv_entry_t pv, npv; 649 register int ix; 650 pmap_t ptpmap; 651 int *ste, s, bits; 652 boolean_t firstpage = TRUE; 653 boolean_t flushcache = FALSE; 654 #ifdef DEBUG 655 pt_entry_t opte; 656 657 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 658 printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva); 659 #endif 660 661 if (pmap == NULL) 662 return; 663 664 #ifdef DEBUG 665 remove_stats.calls++; 666 #endif 667 for (va = sva; va < eva; va += PAGE_SIZE) { 668 /* 669 * Weed out invalid mappings. 670 * Note: we assume that the segment table is always allocated. 671 */ 672 if (!pmap_ste_v(pmap_ste(pmap, va))) { 673 /* XXX: avoid address wrap around */ 674 if (va >= hp300_trunc_seg((vm_offset_t)-1)) 675 break; 676 va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE; 677 continue; 678 } 679 pte = pmap_pte(pmap, va); 680 pa = pmap_pte_pa(pte); 681 if (pa == 0) 682 continue; 683 /* 684 * Invalidating a non-CI page, must flush external VAC 685 * unless it is a supervisor mapping and we have already 686 * flushed the supervisor side. 687 */ 688 if (pmap_aliasmask && !pmap_pte_ci(pte) && 689 !(pmap == kernel_pmap && firstpage)) 690 flushcache = TRUE; 691 #ifdef DEBUG 692 opte = *pte; 693 remove_stats.removes++; 694 #endif 695 /* 696 * Update statistics 697 */ 698 if (pmap_pte_w(pte)) 699 pmap->pm_stats.wired_count--; 700 pmap->pm_stats.resident_count--; 701 702 /* 703 * Invalidate the PTEs. 704 * XXX: should cluster them up and invalidate as many 705 * as possible at once. 706 */ 707 #ifdef DEBUG 708 if (pmapdebug & PDB_REMOVE) 709 printf("remove: invalidating %x ptes at %x\n", 710 hppagesperpage, pte); 711 #endif 712 /* 713 * Flush VAC to ensure we get the correct state of any 714 * hardware maintained bits. 715 */ 716 if (firstpage && pmap_aliasmask) { 717 firstpage = FALSE; 718 if (pmap == kernel_pmap) 719 flushcache = FALSE; 720 DCIS(); 721 #ifdef DEBUG 722 remove_stats.sflushes++; 723 #endif 724 } 725 bits = ix = 0; 726 do { 727 bits |= *(int *)pte & (PG_U|PG_M); 728 *(int *)pte++ = PG_NV; 729 TBIS(va + ix * HP_PAGE_SIZE); 730 } while (++ix != hppagesperpage); 731 732 /* 733 * For user mappings decrement the wiring count on 734 * the PT page. We do this after the PTE has been 735 * invalidated because vm_map_pageable winds up in 736 * pmap_pageable which clears the modify bit for the 737 * PT page. 738 */ 739 if (pmap != kernel_pmap) { 740 pte = pmap_pte(pmap, va); 741 vm_map_pageable(pt_map, trunc_page(pte), 742 round_page(pte+1), TRUE); 743 #ifdef DEBUG 744 if (pmapdebug & PDB_WIRING) 745 pmap_check_wiring("remove", trunc_page(pte)); 746 #endif 747 } 748 /* 749 * Remove from the PV table (raise IPL since we 750 * may be called at interrupt time). 751 */ 752 if (pa < vm_first_phys || pa >= vm_last_phys) 753 continue; 754 pv = pa_to_pvh(pa); 755 ste = (int *)0; 756 s = splimp(); 757 /* 758 * If it is the first entry on the list, it is actually 759 * in the header and we must copy the following entry up 760 * to the header. Otherwise we must search the list for 761 * the entry. In either case we free the now unused entry. 762 */ 763 if (pmap == pv->pv_pmap && va == pv->pv_va) { 764 ste = (int *)pv->pv_ptste; 765 ptpmap = pv->pv_ptpmap; 766 npv = pv->pv_next; 767 if (npv) { 768 *pv = *npv; 769 free((caddr_t)npv, M_VMPVENT); 770 } else 771 pv->pv_pmap = NULL; 772 #ifdef DEBUG 773 remove_stats.pvfirst++; 774 #endif 775 } else { 776 for (npv = pv->pv_next; npv; npv = npv->pv_next) { 777 #ifdef DEBUG 778 remove_stats.pvsearch++; 779 #endif 780 if (pmap == npv->pv_pmap && va == npv->pv_va) 781 break; 782 pv = npv; 783 } 784 #ifdef DEBUG 785 if (npv == NULL) 786 panic("pmap_remove: PA not in pv_tab"); 787 #endif 788 ste = (int *)npv->pv_ptste; 789 ptpmap = npv->pv_ptpmap; 790 pv->pv_next = npv->pv_next; 791 free((caddr_t)npv, M_VMPVENT); 792 pv = pa_to_pvh(pa); 793 } 794 /* 795 * If only one mapping left we no longer need to cache inhibit 796 */ 797 if (pv->pv_pmap && 798 pv->pv_next == NULL && (pv->pv_flags & PV_CI)) { 799 #ifdef DEBUG 800 if (pmapdebug & PDB_CACHE) 801 printf("remove: clearing CI for pa %x\n", pa); 802 #endif 803 pv->pv_flags &= ~PV_CI; 804 pmap_changebit(pa, PG_CI, FALSE); 805 #ifdef DEBUG 806 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) == 807 (PDB_CACHE|PDB_PVDUMP)) 808 pmap_pvdump(pa); 809 #endif 810 } 811 812 /* 813 * If this was a PT page we must also remove the 814 * mapping from the associated segment table. 815 */ 816 if (ste) { 817 #ifdef DEBUG 818 remove_stats.ptinvalid++; 819 if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE)) { 820 printf("remove: ste was %x@%x pte was %x@%x\n", 821 *ste, ste, 822 *(int *)&opte, pmap_pte(pmap, va)); 823 } 824 #endif 825 *ste = SG_NV; 826 /* 827 * If it was a user PT page, we decrement the 828 * reference count on the segment table as well, 829 * freeing it if it is now empty. 830 */ 831 if (ptpmap != kernel_pmap) { 832 #ifdef DEBUG 833 if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB)) 834 printf("remove: stab %x, refcnt %d\n", 835 ptpmap->pm_stab, 836 ptpmap->pm_sref - 1); 837 if ((pmapdebug & PDB_PARANOIA) && 838 ptpmap->pm_stab != (st_entry_t *)trunc_page(ste)) 839 panic("remove: bogus ste"); 840 #endif 841 if (--(ptpmap->pm_sref) == 0) { 842 #ifdef DEBUG 843 if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB)) 844 printf("remove: free stab %x\n", 845 ptpmap->pm_stab); 846 #endif 847 kmem_free(kernel_map, 848 (vm_offset_t)ptpmap->pm_stab, 849 HP_STSIZE); 850 ptpmap->pm_stab = Segtabzero; 851 ptpmap->pm_stchanged = TRUE; 852 /* 853 * XXX may have changed segment table 854 * pointer for current process so 855 * update now to reload hardware. 856 */ 857 if (ptpmap == curproc->p_vmspace->vm_map.pmap) 858 PMAP_ACTIVATE(ptpmap, 859 (struct pcb *)curproc->p_addr, 1); 860 } 861 } 862 if (ptpmap == kernel_pmap) 863 TBIAS(); 864 else 865 TBIAU(); 866 pv->pv_flags &= ~PV_PTPAGE; 867 ptpmap->pm_ptpages--; 868 } 869 /* 870 * Update saved attributes for managed page 871 */ 872 pmap_attributes[pa_index(pa)] |= bits; 873 splx(s); 874 } 875 #ifdef DEBUG 876 if (pmapvacflush & PVF_REMOVE) { 877 if (pmapvacflush & PVF_TOTAL) 878 DCIA(); 879 else if (pmap == kernel_pmap) 880 DCIS(); 881 else 882 DCIU(); 883 } 884 #endif 885 if (flushcache) { 886 if (pmap == kernel_pmap) { 887 DCIS(); 888 #ifdef DEBUG 889 remove_stats.sflushes++; 890 #endif 891 } else { 892 DCIU(); 893 #ifdef DEBUG 894 remove_stats.uflushes++; 895 #endif 896 } 897 } 898 } 899 900 /* 901 * pmap_page_protect: 902 * 903 * Lower the permission for all mappings to a given page. 904 */ 905 void 906 pmap_page_protect(pa, prot) 907 vm_offset_t pa; 908 vm_prot_t prot; 909 { 910 register pv_entry_t pv; 911 int s; 912 913 #ifdef DEBUG 914 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || 915 prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)) 916 printf("pmap_page_protect(%x, %x)\n", pa, prot); 917 #endif 918 if (pa < vm_first_phys || pa >= vm_last_phys) 919 return; 920 921 switch (prot) { 922 case VM_PROT_ALL: 923 break; 924 /* copy_on_write */ 925 case VM_PROT_READ: 926 case VM_PROT_READ|VM_PROT_EXECUTE: 927 pmap_changebit(pa, PG_RO, TRUE); 928 break; 929 /* remove_all */ 930 default: 931 pv = pa_to_pvh(pa); 932 s = splimp(); 933 while (pv->pv_pmap != NULL) { 934 #ifdef DEBUG 935 if (!pmap_ste_v(pmap_ste(pv->pv_pmap,pv->pv_va)) || 936 pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)) != pa) 937 panic("pmap_page_protect: bad mapping"); 938 #endif 939 pmap_remove(pv->pv_pmap, pv->pv_va, 940 pv->pv_va + PAGE_SIZE); 941 } 942 splx(s); 943 break; 944 } 945 } 946 947 /* 948 * Set the physical protection on the 949 * specified range of this map as requested. 950 */ 951 void 952 pmap_protect(pmap, sva, eva, prot) 953 register pmap_t pmap; 954 vm_offset_t sva, eva; 955 vm_prot_t prot; 956 { 957 register pt_entry_t *pte; 958 register vm_offset_t va; 959 register int ix; 960 int hpprot; 961 boolean_t firstpage = TRUE; 962 963 #ifdef DEBUG 964 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 965 printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot); 966 #endif 967 if (pmap == NULL) 968 return; 969 970 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 971 pmap_remove(pmap, sva, eva); 972 return; 973 } 974 if (prot & VM_PROT_WRITE) 975 return; 976 977 pte = pmap_pte(pmap, sva); 978 hpprot = pte_prot(pmap, prot) == PG_RO ? 1 : 0; 979 for (va = sva; va < eva; va += PAGE_SIZE) { 980 /* 981 * Page table page is not allocated. 982 * Skip it, we don't want to force allocation 983 * of unnecessary PTE pages just to set the protection. 984 */ 985 if (!pmap_ste_v(pmap_ste(pmap, va))) { 986 /* XXX: avoid address wrap around */ 987 if (va >= hp300_trunc_seg((vm_offset_t)-1)) 988 break; 989 va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE; 990 pte = pmap_pte(pmap, va); 991 pte += hppagesperpage; 992 continue; 993 } 994 /* 995 * Page not valid. Again, skip it. 996 * Should we do this? Or set protection anyway? 997 */ 998 if (!pmap_pte_v(pte)) { 999 pte += hppagesperpage; 1000 continue; 1001 } 1002 /* 1003 * Flush VAC to ensure we get correct state of HW bits 1004 * so we don't clobber them. 1005 */ 1006 if (firstpage && pmap_aliasmask) { 1007 firstpage = FALSE; 1008 DCIS(); 1009 } 1010 ix = 0; 1011 do { 1012 /* clear VAC here if PG_RO? */ 1013 pmap_pte_set_prot(pte++, hpprot); 1014 TBIS(va + ix * HP_PAGE_SIZE); 1015 } while (++ix != hppagesperpage); 1016 } 1017 #ifdef DEBUG 1018 if (hpprot && (pmapvacflush & PVF_PROTECT)) { 1019 if (pmapvacflush & PVF_TOTAL) 1020 DCIA(); 1021 else if (pmap == kernel_pmap) 1022 DCIS(); 1023 else 1024 DCIU(); 1025 } 1026 #endif 1027 } 1028 1029 /* 1030 * Insert the given physical page (p) at 1031 * the specified virtual address (v) in the 1032 * target physical map with the protection requested. 1033 * 1034 * If specified, the page will be wired down, meaning 1035 * that the related pte can not be reclaimed. 1036 * 1037 * NB: This is the only routine which MAY NOT lazy-evaluate 1038 * or lose information. That is, this routine must actually 1039 * insert this page into the given map NOW. 1040 */ 1041 void 1042 pmap_enter(pmap, va, pa, prot, wired) 1043 register pmap_t pmap; 1044 vm_offset_t va; 1045 register vm_offset_t pa; 1046 vm_prot_t prot; 1047 boolean_t wired; 1048 { 1049 register pt_entry_t *pte; 1050 register int npte, ix; 1051 vm_offset_t opa; 1052 boolean_t cacheable = TRUE; 1053 boolean_t checkpv = TRUE; 1054 1055 #ifdef DEBUG 1056 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 1057 printf("pmap_enter(%x, %x, %x, %x, %x)\n", 1058 pmap, va, pa, prot, wired); 1059 #endif 1060 if (pmap == NULL) 1061 return; 1062 1063 #ifdef DEBUG 1064 if (pmap == kernel_pmap) 1065 enter_stats.kernel++; 1066 else 1067 enter_stats.user++; 1068 #endif 1069 /* 1070 * For user mapping, allocate kernel VM resources if necessary. 1071 */ 1072 if (pmap->pm_ptab == NULL) 1073 pmap->pm_ptab = (pt_entry_t *) 1074 kmem_alloc_wait(pt_map, HP_MAX_PTSIZE); 1075 1076 /* 1077 * Segment table entry not valid, we need a new PT page 1078 */ 1079 if (!pmap_ste_v(pmap_ste(pmap, va))) 1080 pmap_enter_ptpage(pmap, va); 1081 1082 pte = pmap_pte(pmap, va); 1083 opa = pmap_pte_pa(pte); 1084 #ifdef DEBUG 1085 if (pmapdebug & PDB_ENTER) 1086 printf("enter: pte %x, *pte %x\n", pte, *(int *)pte); 1087 #endif 1088 1089 /* 1090 * Mapping has not changed, must be protection or wiring change. 1091 */ 1092 if (opa == pa) { 1093 #ifdef DEBUG 1094 enter_stats.pwchange++; 1095 #endif 1096 /* 1097 * Wiring change, just update stats. 1098 * We don't worry about wiring PT pages as they remain 1099 * resident as long as there are valid mappings in them. 1100 * Hence, if a user page is wired, the PT page will be also. 1101 */ 1102 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { 1103 #ifdef DEBUG 1104 if (pmapdebug & PDB_ENTER) 1105 printf("enter: wiring change -> %x\n", wired); 1106 #endif 1107 if (wired) 1108 pmap->pm_stats.wired_count++; 1109 else 1110 pmap->pm_stats.wired_count--; 1111 #ifdef DEBUG 1112 enter_stats.wchange++; 1113 #endif 1114 } 1115 /* 1116 * Retain cache inhibition status 1117 */ 1118 checkpv = FALSE; 1119 if (pmap_pte_ci(pte)) 1120 cacheable = FALSE; 1121 goto validate; 1122 } 1123 1124 /* 1125 * Mapping has changed, invalidate old range and fall through to 1126 * handle validating new mapping. 1127 */ 1128 if (opa) { 1129 #ifdef DEBUG 1130 if (pmapdebug & PDB_ENTER) 1131 printf("enter: removing old mapping %x\n", va); 1132 #endif 1133 pmap_remove(pmap, va, va + PAGE_SIZE); 1134 #ifdef DEBUG 1135 enter_stats.mchange++; 1136 #endif 1137 } 1138 1139 /* 1140 * If this is a new user mapping, increment the wiring count 1141 * on this PT page. PT pages are wired down as long as there 1142 * is a valid mapping in the page. 1143 */ 1144 if (pmap != kernel_pmap) 1145 vm_map_pageable(pt_map, trunc_page(pte), 1146 round_page(pte+1), FALSE); 1147 1148 /* 1149 * Enter on the PV list if part of our managed memory 1150 * Note that we raise IPL while manipulating pv_table 1151 * since pmap_enter can be called at interrupt time. 1152 */ 1153 if (pa >= vm_first_phys && pa < vm_last_phys) { 1154 register pv_entry_t pv, npv; 1155 int s; 1156 1157 #ifdef DEBUG 1158 enter_stats.managed++; 1159 #endif 1160 pv = pa_to_pvh(pa); 1161 s = splimp(); 1162 #ifdef DEBUG 1163 if (pmapdebug & PDB_ENTER) 1164 printf("enter: pv at %x: %x/%x/%x\n", 1165 pv, pv->pv_va, pv->pv_pmap, pv->pv_next); 1166 #endif 1167 /* 1168 * No entries yet, use header as the first entry 1169 */ 1170 if (pv->pv_pmap == NULL) { 1171 #ifdef DEBUG 1172 enter_stats.firstpv++; 1173 #endif 1174 pv->pv_va = va; 1175 pv->pv_pmap = pmap; 1176 pv->pv_next = NULL; 1177 pv->pv_ptste = NULL; 1178 pv->pv_ptpmap = NULL; 1179 pv->pv_flags = 0; 1180 } 1181 /* 1182 * There is at least one other VA mapping this page. 1183 * Place this entry after the header. 1184 */ 1185 else { 1186 #ifdef DEBUG 1187 for (npv = pv; npv; npv = npv->pv_next) 1188 if (pmap == npv->pv_pmap && va == npv->pv_va) 1189 panic("pmap_enter: already in pv_tab"); 1190 #endif 1191 npv = (pv_entry_t) 1192 malloc(sizeof *npv, M_VMPVENT, M_NOWAIT); 1193 npv->pv_va = va; 1194 npv->pv_pmap = pmap; 1195 npv->pv_next = pv->pv_next; 1196 npv->pv_ptste = NULL; 1197 npv->pv_ptpmap = NULL; 1198 pv->pv_next = npv; 1199 #ifdef DEBUG 1200 if (!npv->pv_next) 1201 enter_stats.secondpv++; 1202 #endif 1203 /* 1204 * Since there is another logical mapping for the 1205 * same page we may need to cache-inhibit the 1206 * descriptors on those CPUs with external VACs. 1207 * We don't need to CI if: 1208 * 1209 * - No two mappings belong to the same user pmaps. 1210 * Since the cache is flushed on context switches 1211 * there is no problem between user processes. 1212 * 1213 * - Mappings within a single pmap are a certain 1214 * magic distance apart. VAs at these appropriate 1215 * boundaries map to the same cache entries or 1216 * otherwise don't conflict. 1217 * 1218 * To keep it simple, we only check for these special 1219 * cases if there are only two mappings, otherwise we 1220 * punt and always CI. 1221 * 1222 * Note that there are no aliasing problems with the 1223 * on-chip data-cache when the WA bit is set. 1224 */ 1225 if (pmap_aliasmask) { 1226 if (pv->pv_flags & PV_CI) { 1227 #ifdef DEBUG 1228 if (pmapdebug & PDB_CACHE) 1229 printf("enter: pa %x already CI'ed\n", 1230 pa); 1231 #endif 1232 checkpv = cacheable = FALSE; 1233 } else if (npv->pv_next || 1234 ((pmap == pv->pv_pmap || 1235 pmap == kernel_pmap || 1236 pv->pv_pmap == kernel_pmap) && 1237 ((pv->pv_va & pmap_aliasmask) != 1238 (va & pmap_aliasmask)))) { 1239 #ifdef DEBUG 1240 if (pmapdebug & PDB_CACHE) 1241 printf("enter: pa %x CI'ing all\n", 1242 pa); 1243 #endif 1244 cacheable = FALSE; 1245 pv->pv_flags |= PV_CI; 1246 #ifdef DEBUG 1247 enter_stats.ci++; 1248 #endif 1249 } 1250 } 1251 } 1252 splx(s); 1253 } 1254 /* 1255 * Assumption: if it is not part of our managed memory 1256 * then it must be device memory which may be volitile. 1257 */ 1258 else if (pmap_initialized) { 1259 checkpv = cacheable = FALSE; 1260 #ifdef DEBUG 1261 enter_stats.unmanaged++; 1262 #endif 1263 } 1264 1265 /* 1266 * Increment counters 1267 */ 1268 pmap->pm_stats.resident_count++; 1269 if (wired) 1270 pmap->pm_stats.wired_count++; 1271 1272 validate: 1273 /* 1274 * Flush VAC to ensure we get correct state of HW bits 1275 * so we don't clobber them. 1276 */ 1277 if (pmap_aliasmask) 1278 DCIS(); 1279 /* 1280 * Now validate mapping with desired protection/wiring. 1281 * Assume uniform modified and referenced status for all 1282 * HP pages in a MACH page. 1283 */ 1284 npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V; 1285 npte |= (*(int *)pte & (PG_M|PG_U)); 1286 if (wired) 1287 npte |= PG_W; 1288 if (!checkpv && !cacheable) 1289 npte |= PG_CI; 1290 #ifdef DEBUG 1291 if (pmapdebug & PDB_ENTER) 1292 printf("enter: new pte value %x\n", npte); 1293 #endif 1294 ix = 0; 1295 do { 1296 *(int *)pte++ = npte; 1297 TBIS(va); 1298 npte += HP_PAGE_SIZE; 1299 va += HP_PAGE_SIZE; 1300 } while (++ix != hppagesperpage); 1301 /* 1302 * The following is executed if we are entering a second 1303 * (or greater) mapping for a physical page and the mappings 1304 * may create an aliasing problem. In this case we must 1305 * cache inhibit the descriptors involved and flush any 1306 * external VAC. 1307 */ 1308 if (checkpv && !cacheable) { 1309 pmap_changebit(pa, PG_CI, TRUE); 1310 DCIA(); 1311 #ifdef DEBUG 1312 enter_stats.flushes++; 1313 #endif 1314 #ifdef DEBUG 1315 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) == 1316 (PDB_CACHE|PDB_PVDUMP)) 1317 pmap_pvdump(pa); 1318 #endif 1319 } 1320 #ifdef DEBUG 1321 else if (pmapvacflush & PVF_ENTER) { 1322 if (pmapvacflush & PVF_TOTAL) 1323 DCIA(); 1324 else if (pmap == kernel_pmap) 1325 DCIS(); 1326 else 1327 DCIU(); 1328 } 1329 if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap) { 1330 va -= PAGE_SIZE; 1331 pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va))); 1332 } 1333 #endif 1334 } 1335 1336 /* 1337 * Routine: pmap_change_wiring 1338 * Function: Change the wiring attribute for a map/virtual-address 1339 * pair. 1340 * In/out conditions: 1341 * The mapping must already exist in the pmap. 1342 */ 1343 void 1344 pmap_change_wiring(pmap, va, wired) 1345 register pmap_t pmap; 1346 vm_offset_t va; 1347 boolean_t wired; 1348 { 1349 register pt_entry_t *pte; 1350 register int ix; 1351 1352 #ifdef DEBUG 1353 if (pmapdebug & PDB_FOLLOW) 1354 printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired); 1355 #endif 1356 if (pmap == NULL) 1357 return; 1358 1359 pte = pmap_pte(pmap, va); 1360 #ifdef DEBUG 1361 /* 1362 * Page table page is not allocated. 1363 * Should this ever happen? Ignore it for now, 1364 * we don't want to force allocation of unnecessary PTE pages. 1365 */ 1366 if (!pmap_ste_v(pmap_ste(pmap, va))) { 1367 if (pmapdebug & PDB_PARANOIA) 1368 printf("pmap_change_wiring: invalid STE for %x\n", va); 1369 return; 1370 } 1371 /* 1372 * Page not valid. Should this ever happen? 1373 * Just continue and change wiring anyway. 1374 */ 1375 if (!pmap_pte_v(pte)) { 1376 if (pmapdebug & PDB_PARANOIA) 1377 printf("pmap_change_wiring: invalid PTE for %x\n", va); 1378 } 1379 #endif 1380 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { 1381 if (wired) 1382 pmap->pm_stats.wired_count++; 1383 else 1384 pmap->pm_stats.wired_count--; 1385 } 1386 /* 1387 * Wiring is not a hardware characteristic so there is no need 1388 * to invalidate TLB. 1389 */ 1390 ix = 0; 1391 do { 1392 pmap_pte_set_w(pte++, wired); 1393 } while (++ix != hppagesperpage); 1394 } 1395 1396 /* 1397 * Routine: pmap_extract 1398 * Function: 1399 * Extract the physical page address associated 1400 * with the given map/virtual_address pair. 1401 */ 1402 1403 vm_offset_t 1404 pmap_extract(pmap, va) 1405 register pmap_t pmap; 1406 vm_offset_t va; 1407 { 1408 register vm_offset_t pa; 1409 1410 #ifdef DEBUG 1411 if (pmapdebug & PDB_FOLLOW) 1412 printf("pmap_extract(%x, %x) -> ", pmap, va); 1413 #endif 1414 pa = 0; 1415 if (pmap && pmap_ste_v(pmap_ste(pmap, va))) 1416 pa = *(int *)pmap_pte(pmap, va); 1417 if (pa) 1418 pa = (pa & PG_FRAME) | (va & ~PG_FRAME); 1419 #ifdef DEBUG 1420 if (pmapdebug & PDB_FOLLOW) 1421 printf("%x\n", pa); 1422 #endif 1423 return(pa); 1424 } 1425 1426 /* 1427 * Copy the range specified by src_addr/len 1428 * from the source map to the range dst_addr/len 1429 * in the destination map. 1430 * 1431 * This routine is only advisory and need not do anything. 1432 */ 1433 void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 1434 pmap_t dst_pmap; 1435 pmap_t src_pmap; 1436 vm_offset_t dst_addr; 1437 vm_size_t len; 1438 vm_offset_t src_addr; 1439 { 1440 #ifdef DEBUG 1441 if (pmapdebug & PDB_FOLLOW) 1442 printf("pmap_copy(%x, %x, %x, %x, %x)\n", 1443 dst_pmap, src_pmap, dst_addr, len, src_addr); 1444 #endif 1445 } 1446 1447 /* 1448 * Require that all active physical maps contain no 1449 * incorrect entries NOW. [This update includes 1450 * forcing updates of any address map caching.] 1451 * 1452 * Generally used to insure that a thread about 1453 * to run will see a semantically correct world. 1454 */ 1455 void pmap_update() 1456 { 1457 #ifdef DEBUG 1458 if (pmapdebug & PDB_FOLLOW) 1459 printf("pmap_update()\n"); 1460 #endif 1461 TBIA(); 1462 } 1463 1464 /* 1465 * Routine: pmap_collect 1466 * Function: 1467 * Garbage collects the physical map system for 1468 * pages which are no longer used. 1469 * Success need not be guaranteed -- that is, there 1470 * may well be pages which are not referenced, but 1471 * others may be collected. 1472 * Usage: 1473 * Called by the pageout daemon when pages are scarce. 1474 */ 1475 void 1476 pmap_collect(pmap) 1477 pmap_t pmap; 1478 { 1479 register vm_offset_t pa; 1480 register pv_entry_t pv; 1481 register int *pte; 1482 vm_offset_t kpa; 1483 int s; 1484 1485 #ifdef DEBUG 1486 int *ste; 1487 int opmapdebug; 1488 #endif 1489 if (pmap != kernel_pmap) 1490 return; 1491 1492 #ifdef DEBUG 1493 if (pmapdebug & PDB_FOLLOW) 1494 printf("pmap_collect(%x)\n", pmap); 1495 kpt_stats.collectscans++; 1496 #endif 1497 s = splimp(); 1498 for (pa = vm_first_phys; pa < vm_last_phys; pa += PAGE_SIZE) { 1499 register struct kpt_page *kpt, **pkpt; 1500 1501 /* 1502 * Locate physical pages which are being used as kernel 1503 * page table pages. 1504 */ 1505 pv = pa_to_pvh(pa); 1506 if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE)) 1507 continue; 1508 do { 1509 if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap) 1510 break; 1511 } while (pv = pv->pv_next); 1512 if (pv == NULL) 1513 continue; 1514 #ifdef DEBUG 1515 if (pv->pv_va < (vm_offset_t)Sysmap || 1516 pv->pv_va >= (vm_offset_t)Sysmap + HP_MAX_PTSIZE) 1517 printf("collect: kernel PT VA out of range\n"); 1518 else 1519 goto ok; 1520 pmap_pvdump(pa); 1521 continue; 1522 ok: 1523 #endif 1524 pte = (int *)(pv->pv_va + HP_PAGE_SIZE); 1525 while (--pte >= (int *)pv->pv_va && *pte == PG_NV) 1526 ; 1527 if (pte >= (int *)pv->pv_va) 1528 continue; 1529 1530 #ifdef DEBUG 1531 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) { 1532 printf("collect: freeing KPT page at %x (ste %x@%x)\n", 1533 pv->pv_va, *(int *)pv->pv_ptste, pv->pv_ptste); 1534 opmapdebug = pmapdebug; 1535 pmapdebug |= PDB_PTPAGE; 1536 } 1537 1538 ste = (int *)pv->pv_ptste; 1539 #endif 1540 /* 1541 * If all entries were invalid we can remove the page. 1542 * We call pmap_remove to take care of invalidating ST 1543 * and Sysptmap entries. 1544 */ 1545 kpa = pmap_extract(pmap, pv->pv_va); 1546 pmap_remove(pmap, pv->pv_va, pv->pv_va + HP_PAGE_SIZE); 1547 /* 1548 * Use the physical address to locate the original 1549 * (kmem_alloc assigned) address for the page and put 1550 * that page back on the free list. 1551 */ 1552 for (pkpt = &kpt_used_list, kpt = *pkpt; 1553 kpt != (struct kpt_page *)0; 1554 pkpt = &kpt->kpt_next, kpt = *pkpt) 1555 if (kpt->kpt_pa == kpa) 1556 break; 1557 #ifdef DEBUG 1558 if (kpt == (struct kpt_page *)0) 1559 panic("pmap_collect: lost a KPT page"); 1560 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) 1561 printf("collect: %x (%x) to free list\n", 1562 kpt->kpt_va, kpa); 1563 #endif 1564 *pkpt = kpt->kpt_next; 1565 kpt->kpt_next = kpt_free_list; 1566 kpt_free_list = kpt; 1567 #ifdef DEBUG 1568 kpt_stats.kptinuse--; 1569 kpt_stats.collectpages++; 1570 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) 1571 pmapdebug = opmapdebug; 1572 1573 if (*ste) 1574 printf("collect: kernel STE at %x still valid (%x)\n", 1575 ste, *ste); 1576 ste = (int *)&Sysptmap[(st_entry_t *)ste-pmap_ste(kernel_pmap, 0)]; 1577 if (*ste) 1578 printf("collect: kernel PTmap at %x still valid (%x)\n", 1579 ste, *ste); 1580 #endif 1581 } 1582 splx(s); 1583 } 1584 1585 void 1586 pmap_activate(pmap, pcbp) 1587 register pmap_t pmap; 1588 struct pcb *pcbp; 1589 { 1590 #ifdef DEBUG 1591 if (pmapdebug & (PDB_FOLLOW|PDB_SEGTAB)) 1592 printf("pmap_activate(%x, %x)\n", pmap, pcbp); 1593 #endif 1594 PMAP_ACTIVATE(pmap, pcbp, pmap == curproc->p_vmspace->vm_map.pmap); 1595 } 1596 1597 /* 1598 * pmap_zero_page zeros the specified (machine independent) 1599 * page by mapping the page into virtual memory and using 1600 * bzero to clear its contents, one machine dependent page 1601 * at a time. 1602 */ 1603 void 1604 pmap_zero_page(phys) 1605 register vm_offset_t phys; 1606 { 1607 register int ix; 1608 1609 #ifdef DEBUG 1610 if (pmapdebug & PDB_FOLLOW) 1611 printf("pmap_zero_page(%x)\n", phys); 1612 #endif 1613 phys >>= PG_SHIFT; 1614 ix = 0; 1615 do { 1616 clearseg(phys++); 1617 } while (++ix != hppagesperpage); 1618 } 1619 1620 /* 1621 * pmap_copy_page copies the specified (machine independent) 1622 * page by mapping the page into virtual memory and using 1623 * bcopy to copy the page, one machine dependent page at a 1624 * time. 1625 */ 1626 void 1627 pmap_copy_page(src, dst) 1628 register vm_offset_t src, dst; 1629 { 1630 register int ix; 1631 1632 #ifdef DEBUG 1633 if (pmapdebug & PDB_FOLLOW) 1634 printf("pmap_copy_page(%x, %x)\n", src, dst); 1635 #endif 1636 src >>= PG_SHIFT; 1637 dst >>= PG_SHIFT; 1638 ix = 0; 1639 do { 1640 physcopyseg(src++, dst++); 1641 } while (++ix != hppagesperpage); 1642 } 1643 1644 1645 /* 1646 * Routine: pmap_pageable 1647 * Function: 1648 * Make the specified pages (by pmap, offset) 1649 * pageable (or not) as requested. 1650 * 1651 * A page which is not pageable may not take 1652 * a fault; therefore, its page table entry 1653 * must remain valid for the duration. 1654 * 1655 * This routine is merely advisory; pmap_enter 1656 * will specify that these pages are to be wired 1657 * down (or not) as appropriate. 1658 */ 1659 void 1660 pmap_pageable(pmap, sva, eva, pageable) 1661 pmap_t pmap; 1662 vm_offset_t sva, eva; 1663 boolean_t pageable; 1664 { 1665 #ifdef DEBUG 1666 if (pmapdebug & PDB_FOLLOW) 1667 printf("pmap_pageable(%x, %x, %x, %x)\n", 1668 pmap, sva, eva, pageable); 1669 #endif 1670 /* 1671 * If we are making a PT page pageable then all valid 1672 * mappings must be gone from that page. Hence it should 1673 * be all zeros and there is no need to clean it. 1674 * Assumptions: 1675 * - we are called with only one page at a time 1676 * - PT pages have only one pv_table entry 1677 */ 1678 if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) { 1679 register pv_entry_t pv; 1680 register vm_offset_t pa; 1681 1682 #ifdef DEBUG 1683 if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE) 1684 printf("pmap_pageable(%x, %x, %x, %x)\n", 1685 pmap, sva, eva, pageable); 1686 #endif 1687 if (!pmap_ste_v(pmap_ste(pmap, sva))) 1688 return; 1689 pa = pmap_pte_pa(pmap_pte(pmap, sva)); 1690 if (pa < vm_first_phys || pa >= vm_last_phys) 1691 return; 1692 pv = pa_to_pvh(pa); 1693 if (pv->pv_ptste == NULL) 1694 return; 1695 #ifdef DEBUG 1696 if (pv->pv_va != sva || pv->pv_next) { 1697 printf("pmap_pageable: bad PT page va %x next %x\n", 1698 pv->pv_va, pv->pv_next); 1699 return; 1700 } 1701 #endif 1702 /* 1703 * Mark it unmodified to avoid pageout 1704 */ 1705 pmap_changebit(pa, PG_M, FALSE); 1706 #ifdef DEBUG 1707 if (pmapdebug & PDB_PTPAGE) 1708 printf("pmap_pageable: PT page %x(%x) unmodified\n", 1709 sva, *(int *)pmap_pte(pmap, sva)); 1710 if (pmapdebug & PDB_WIRING) 1711 pmap_check_wiring("pageable", sva); 1712 #endif 1713 } 1714 } 1715 1716 /* 1717 * Clear the modify bits on the specified physical page. 1718 */ 1719 1720 void 1721 pmap_clear_modify(pa) 1722 vm_offset_t pa; 1723 { 1724 #ifdef DEBUG 1725 if (pmapdebug & PDB_FOLLOW) 1726 printf("pmap_clear_modify(%x)\n", pa); 1727 #endif 1728 pmap_changebit(pa, PG_M, FALSE); 1729 } 1730 1731 /* 1732 * pmap_clear_reference: 1733 * 1734 * Clear the reference bit on the specified physical page. 1735 */ 1736 1737 void pmap_clear_reference(pa) 1738 vm_offset_t pa; 1739 { 1740 #ifdef DEBUG 1741 if (pmapdebug & PDB_FOLLOW) 1742 printf("pmap_clear_reference(%x)\n", pa); 1743 #endif 1744 pmap_changebit(pa, PG_U, FALSE); 1745 } 1746 1747 /* 1748 * pmap_is_referenced: 1749 * 1750 * Return whether or not the specified physical page is referenced 1751 * by any physical maps. 1752 */ 1753 1754 boolean_t 1755 pmap_is_referenced(pa) 1756 vm_offset_t pa; 1757 { 1758 #ifdef DEBUG 1759 if (pmapdebug & PDB_FOLLOW) { 1760 boolean_t rv = pmap_testbit(pa, PG_U); 1761 printf("pmap_is_referenced(%x) -> %c\n", pa, "FT"[rv]); 1762 return(rv); 1763 } 1764 #endif 1765 return(pmap_testbit(pa, PG_U)); 1766 } 1767 1768 /* 1769 * pmap_is_modified: 1770 * 1771 * Return whether or not the specified physical page is modified 1772 * by any physical maps. 1773 */ 1774 1775 boolean_t 1776 pmap_is_modified(pa) 1777 vm_offset_t pa; 1778 { 1779 #ifdef DEBUG 1780 if (pmapdebug & PDB_FOLLOW) { 1781 boolean_t rv = pmap_testbit(pa, PG_M); 1782 printf("pmap_is_modified(%x) -> %c\n", pa, "FT"[rv]); 1783 return(rv); 1784 } 1785 #endif 1786 return(pmap_testbit(pa, PG_M)); 1787 } 1788 1789 vm_offset_t 1790 pmap_phys_address(ppn) 1791 int ppn; 1792 { 1793 return(hp300_ptob(ppn)); 1794 } 1795 1796 /* 1797 * Miscellaneous support routines follow 1798 */ 1799 1800 /* static */ 1801 hp300_protection_init() 1802 { 1803 register int *kp, prot; 1804 1805 kp = protection_codes; 1806 for (prot = 0; prot < 8; prot++) { 1807 switch (prot) { 1808 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 1809 *kp++ = 0; 1810 break; 1811 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 1812 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 1813 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 1814 *kp++ = PG_RO; 1815 break; 1816 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 1817 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 1818 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 1819 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 1820 *kp++ = PG_RW; 1821 break; 1822 } 1823 } 1824 } 1825 1826 /* static */ 1827 boolean_t 1828 pmap_testbit(pa, bit) 1829 register vm_offset_t pa; 1830 int bit; 1831 { 1832 register pv_entry_t pv; 1833 register int *pte, ix; 1834 int s; 1835 1836 if (pa < vm_first_phys || pa >= vm_last_phys) 1837 return(FALSE); 1838 1839 pv = pa_to_pvh(pa); 1840 s = splimp(); 1841 /* 1842 * Check saved info first 1843 */ 1844 if (pmap_attributes[pa_index(pa)] & bit) { 1845 splx(s); 1846 return(TRUE); 1847 } 1848 /* 1849 * Flush VAC to get correct state of any hardware maintained bits. 1850 */ 1851 if (pmap_aliasmask && (bit & (PG_U|PG_M))) 1852 DCIS(); 1853 /* 1854 * Not found, check current mappings returning 1855 * immediately if found. 1856 */ 1857 if (pv->pv_pmap != NULL) { 1858 for (; pv; pv = pv->pv_next) { 1859 pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va); 1860 ix = 0; 1861 do { 1862 if (*pte++ & bit) { 1863 splx(s); 1864 return(TRUE); 1865 } 1866 } while (++ix != hppagesperpage); 1867 } 1868 } 1869 splx(s); 1870 return(FALSE); 1871 } 1872 1873 /* static */ 1874 pmap_changebit(pa, bit, setem) 1875 register vm_offset_t pa; 1876 int bit; 1877 boolean_t setem; 1878 { 1879 register pv_entry_t pv; 1880 register int *pte, npte, ix; 1881 vm_offset_t va; 1882 int s; 1883 boolean_t firstpage = TRUE; 1884 1885 #ifdef DEBUG 1886 if (pmapdebug & PDB_BITS) 1887 printf("pmap_changebit(%x, %x, %s)\n", 1888 pa, bit, setem ? "set" : "clear"); 1889 #endif 1890 if (pa < vm_first_phys || pa >= vm_last_phys) 1891 return; 1892 1893 pv = pa_to_pvh(pa); 1894 s = splimp(); 1895 /* 1896 * Clear saved attributes (modify, reference) 1897 */ 1898 if (!setem) 1899 pmap_attributes[pa_index(pa)] &= ~bit; 1900 /* 1901 * Loop over all current mappings setting/clearing as appropos 1902 * If setting RO do we need to clear the VAC? 1903 */ 1904 if (pv->pv_pmap != NULL) { 1905 #ifdef DEBUG 1906 int toflush = 0; 1907 #endif 1908 for (; pv; pv = pv->pv_next) { 1909 #ifdef DEBUG 1910 toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1; 1911 #endif 1912 va = pv->pv_va; 1913 1914 /* 1915 * XXX don't write protect pager mappings 1916 */ 1917 if (bit == PG_RO) { 1918 extern vm_offset_t pager_sva, pager_eva; 1919 1920 if (va >= pager_sva && va < pager_eva) 1921 continue; 1922 } 1923 1924 pte = (int *) pmap_pte(pv->pv_pmap, va); 1925 /* 1926 * Flush VAC to ensure we get correct state of HW bits 1927 * so we don't clobber them. 1928 */ 1929 if (firstpage && pmap_aliasmask) { 1930 firstpage = FALSE; 1931 DCIS(); 1932 } 1933 ix = 0; 1934 do { 1935 if (setem) 1936 npte = *pte | bit; 1937 else 1938 npte = *pte & ~bit; 1939 if (*pte != npte) { 1940 *pte = npte; 1941 TBIS(va); 1942 } 1943 va += HP_PAGE_SIZE; 1944 pte++; 1945 } while (++ix != hppagesperpage); 1946 } 1947 #ifdef DEBUG 1948 if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) { 1949 if ((pmapvacflush & PVF_TOTAL) || toflush == 3) 1950 DCIA(); 1951 else if (toflush == 2) 1952 DCIS(); 1953 else 1954 DCIU(); 1955 } 1956 #endif 1957 } 1958 splx(s); 1959 } 1960 1961 /* static */ 1962 void 1963 pmap_enter_ptpage(pmap, va) 1964 register pmap_t pmap; 1965 register vm_offset_t va; 1966 { 1967 register vm_offset_t ptpa; 1968 register pv_entry_t pv; 1969 st_entry_t *ste; 1970 int s; 1971 1972 #ifdef DEBUG 1973 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE)) 1974 printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va); 1975 enter_stats.ptpneeded++; 1976 #endif 1977 /* 1978 * Allocate a segment table if necessary. Note that it is allocated 1979 * from kernel_map and not pt_map. This keeps user page tables 1980 * aligned on segment boundaries in the kernel address space. 1981 * The segment table is wired down. It will be freed whenever the 1982 * reference count drops to zero. 1983 */ 1984 if (pmap->pm_stab == Segtabzero) { 1985 pmap->pm_stab = (st_entry_t *) 1986 kmem_alloc(kernel_map, HP_STSIZE); 1987 pmap->pm_stchanged = TRUE; 1988 /* 1989 * XXX may have changed segment table pointer for current 1990 * process so update now to reload hardware. 1991 */ 1992 if (pmap == curproc->p_vmspace->vm_map.pmap) 1993 PMAP_ACTIVATE(pmap, (struct pcb *)curproc->p_addr, 1); 1994 #ifdef DEBUG 1995 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB)) 1996 printf("enter: pmap %x stab %x\n", 1997 pmap, pmap->pm_stab); 1998 #endif 1999 } 2000 2001 ste = pmap_ste(pmap, va); 2002 va = trunc_page((vm_offset_t)pmap_pte(pmap, va)); 2003 2004 /* 2005 * In the kernel we allocate a page from the kernel PT page 2006 * free list and map it into the kernel page table map (via 2007 * pmap_enter). 2008 */ 2009 if (pmap == kernel_pmap) { 2010 register struct kpt_page *kpt; 2011 2012 s = splimp(); 2013 if ((kpt = kpt_free_list) == (struct kpt_page *)0) { 2014 /* 2015 * No PT pages available. 2016 * Try once to free up unused ones. 2017 */ 2018 #ifdef DEBUG 2019 if (pmapdebug & PDB_COLLECT) 2020 printf("enter: no KPT pages, collecting...\n"); 2021 #endif 2022 pmap_collect(kernel_pmap); 2023 if ((kpt = kpt_free_list) == (struct kpt_page *)0) 2024 panic("pmap_enter_ptpage: can't get KPT page"); 2025 } 2026 #ifdef DEBUG 2027 if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse) 2028 kpt_stats.kptmaxuse = kpt_stats.kptinuse; 2029 #endif 2030 kpt_free_list = kpt->kpt_next; 2031 kpt->kpt_next = kpt_used_list; 2032 kpt_used_list = kpt; 2033 ptpa = kpt->kpt_pa; 2034 bzero(kpt->kpt_va, HP_PAGE_SIZE); 2035 pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT, TRUE); 2036 #ifdef DEBUG 2037 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) 2038 printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n", 2039 ste - pmap_ste(pmap, 0), 2040 *(int *)&Sysptmap[ste - pmap_ste(pmap, 0)], 2041 kpt->kpt_va); 2042 #endif 2043 splx(s); 2044 } 2045 /* 2046 * For user processes we just simulate a fault on that location 2047 * letting the VM system allocate a zero-filled page. 2048 */ 2049 else { 2050 #ifdef DEBUG 2051 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) 2052 printf("enter: about to fault UPT pg at %x\n", va); 2053 #endif 2054 if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE) 2055 != KERN_SUCCESS) 2056 panic("pmap_enter: vm_fault failed"); 2057 ptpa = pmap_extract(kernel_pmap, va); 2058 #ifdef DEBUG 2059 PHYS_TO_VM_PAGE(ptpa)->ptpage = TRUE; 2060 #endif 2061 } 2062 2063 /* 2064 * Locate the PV entry in the kernel for this PT page and 2065 * record the STE address. This is so that we can invalidate 2066 * the STE when we remove the mapping for the page. 2067 */ 2068 pv = pa_to_pvh(ptpa); 2069 s = splimp(); 2070 if (pv) { 2071 pv->pv_flags |= PV_PTPAGE; 2072 do { 2073 if (pv->pv_pmap == kernel_pmap && pv->pv_va == va) 2074 break; 2075 } while (pv = pv->pv_next); 2076 } 2077 #ifdef DEBUG 2078 if (pv == NULL) 2079 panic("pmap_enter_ptpage: PT page not entered"); 2080 #endif 2081 pv->pv_ptste = ste; 2082 pv->pv_ptpmap = pmap; 2083 #ifdef DEBUG 2084 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) 2085 printf("enter: new PT page at PA %x, ste at %x\n", ptpa, ste); 2086 #endif 2087 2088 /* 2089 * Map the new PT page into the segment table. 2090 * Also increment the reference count on the segment table if this 2091 * was a user page table page. Note that we don't use vm_map_pageable 2092 * to keep the count like we do for PT pages, this is mostly because 2093 * it would be difficult to identify ST pages in pmap_pageable to 2094 * release them. We also avoid the overhead of vm_map_pageable. 2095 */ 2096 *(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V; 2097 if (pmap != kernel_pmap) { 2098 pmap->pm_sref++; 2099 #ifdef DEBUG 2100 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB)) 2101 printf("enter: stab %x refcnt %d\n", 2102 pmap->pm_stab, pmap->pm_sref); 2103 #endif 2104 } 2105 /* 2106 * Flush stale TLB info. 2107 */ 2108 if (pmap == kernel_pmap) 2109 TBIAS(); 2110 else 2111 TBIAU(); 2112 pmap->pm_ptpages++; 2113 splx(s); 2114 } 2115 2116 #ifdef DEBUG 2117 pmap_pvdump(pa) 2118 vm_offset_t pa; 2119 { 2120 register pv_entry_t pv; 2121 2122 printf("pa %x", pa); 2123 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) 2124 printf(" -> pmap %x, va %x, ptste %x, ptpmap %x, flags %x", 2125 pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap, 2126 pv->pv_flags); 2127 printf("\n"); 2128 } 2129 2130 pmap_check_wiring(str, va) 2131 char *str; 2132 vm_offset_t va; 2133 { 2134 vm_map_entry_t entry; 2135 register int count, *pte; 2136 2137 va = trunc_page(va); 2138 if (!pmap_ste_v(pmap_ste(kernel_pmap, va)) || 2139 !pmap_pte_v(pmap_pte(kernel_pmap, va))) 2140 return; 2141 2142 if (!vm_map_lookup_entry(pt_map, va, &entry)) { 2143 printf("wired_check: entry for %x not found\n", va); 2144 return; 2145 } 2146 count = 0; 2147 for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++) 2148 if (*pte) 2149 count++; 2150 if (entry->wired_count != count) 2151 printf("*%s*: %x: w%d/a%d\n", 2152 str, va, entry->wired_count, count); 2153 } 2154 #endif 2155