1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department. 8 * 9 * %sccs.include.redist.c% 10 * 11 * @(#)pmap.c 7.5 (Berkeley) 05/10/91 12 */ 13 14 /* 15 * HP9000/300 series physical map management code. 16 * For 68020/68030 machines with HP, 68551, or 68030 MMUs 17 * (models 320,350,318,319,330,340,360,370,345,375) 18 * Don't even pay lip service to multiprocessor support. 19 * 20 * XXX will only work for PAGE_SIZE == NBPG (hppagesperpage == 1) 21 * right now because of the assumed one-to-one relationship of PT 22 * pages to STEs. 23 */ 24 25 /* 26 * Manages physical address maps. 27 * 28 * In addition to hardware address maps, this 29 * module is called upon to provide software-use-only 30 * maps which may or may not be stored in the same 31 * form as hardware maps. These pseudo-maps are 32 * used to store intermediate results from copy 33 * operations to and from address spaces. 34 * 35 * Since the information managed by this module is 36 * also stored by the logical address mapping module, 37 * this module may throw away valid virtual-to-physical 38 * mappings at almost any time. However, invalidations 39 * of virtual-to-physical mappings must be done as 40 * requested. 41 * 42 * In order to cope with hardware architectures which 43 * make virtual-to-physical map invalidates expensive, 44 * this module may delay invalidate or reduced protection 45 * operations until such time as they are actually 46 * necessary. This module is given full information as 47 * to which processors are currently using which maps, 48 * and to when physical maps must be made correct. 49 */ 50 51 #include "param.h" 52 #include "proc.h" 53 #include "malloc.h" 54 #include "user.h" 55 56 #include "pte.h" 57 58 #include "vm/vm.h" 59 #include "vm/vm_kern.h" 60 #include "vm/vm_page.h" 61 #include "vm/vm_statistics.h" 62 63 #include "../include/cpu.h" 64 65 /* 66 * Allocate various and sundry SYSMAPs used in the days of old VM 67 * and not yet converted. XXX. 68 */ 69 #define BSDVM_COMPAT 1 70 71 #ifdef DEBUG 72 struct { 73 int collectscans; 74 int collectpages; 75 int kpttotal; 76 int kptinuse; 77 int kptmaxuse; 78 } kpt_stats; 79 struct { 80 int kernel; /* entering kernel mapping */ 81 int user; /* entering user mapping */ 82 int ptpneeded; /* needed to allocate a PT page */ 83 int pwchange; /* no mapping change, just wiring or protection */ 84 int wchange; /* no mapping change, just wiring */ 85 int mchange; /* was mapped but mapping to different page */ 86 int managed; /* a managed page */ 87 int firstpv; /* first mapping for this PA */ 88 int secondpv; /* second mapping for this PA */ 89 int ci; /* cache inhibited */ 90 int unmanaged; /* not a managed page */ 91 int flushes; /* cache flushes */ 92 } enter_stats; 93 struct { 94 int calls; 95 int removes; 96 int pvfirst; 97 int pvsearch; 98 int ptinvalid; 99 int uflushes; 100 int sflushes; 101 } remove_stats; 102 103 int debugmap = 0; 104 int pmapdebug = 0x2000; 105 #define PDB_FOLLOW 0x0001 106 #define PDB_INIT 0x0002 107 #define PDB_ENTER 0x0004 108 #define PDB_REMOVE 0x0008 109 #define PDB_CREATE 0x0010 110 #define PDB_PTPAGE 0x0020 111 #define PDB_CACHE 0x0040 112 #define PDB_BITS 0x0080 113 #define PDB_COLLECT 0x0100 114 #define PDB_PROTECT 0x0200 115 #define PDB_SEGTAB 0x0400 116 #define PDB_PARANOIA 0x2000 117 #define PDB_WIRING 0x4000 118 #define PDB_PVDUMP 0x8000 119 120 int pmapvacflush = 0; 121 #define PVF_ENTER 0x01 122 #define PVF_REMOVE 0x02 123 #define PVF_PROTECT 0x04 124 #define PVF_TOTAL 0x80 125 126 extern vm_offset_t pager_sva, pager_eva; 127 #endif 128 129 /* 130 * Get STEs and PTEs for user/kernel address space 131 */ 132 #define pmap_ste(m, v) (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT])) 133 #define pmap_pte(m, v) (&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT])) 134 135 #define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME) 136 137 #define pmap_ste_v(pte) ((pte)->sg_v) 138 #define pmap_pte_w(pte) ((pte)->pg_w) 139 #define pmap_pte_ci(pte) ((pte)->pg_ci) 140 #define pmap_pte_m(pte) ((pte)->pg_m) 141 #define pmap_pte_u(pte) ((pte)->pg_u) 142 #define pmap_pte_v(pte) ((pte)->pg_v) 143 #define pmap_pte_set_w(pte, v) ((pte)->pg_w = (v)) 144 #define pmap_pte_set_prot(pte, v) ((pte)->pg_prot = (v)) 145 146 /* 147 * Given a map and a machine independent protection code, 148 * convert to a vax protection code. 149 */ 150 #define pte_prot(m, p) (protection_codes[p]) 151 int protection_codes[8]; 152 153 /* 154 * Kernel page table page management. 155 */ 156 struct kpt_page { 157 struct kpt_page *kpt_next; /* link on either used or free list */ 158 vm_offset_t kpt_va; /* always valid kernel VA */ 159 vm_offset_t kpt_pa; /* PA of this page (for speed) */ 160 }; 161 struct kpt_page *kpt_free_list, *kpt_used_list; 162 struct kpt_page *kpt_pages; 163 164 /* 165 * Kernel segment/page table and page table map. 166 * The page table map gives us a level of indirection we need to dynamically 167 * expand the page table. It is essentially a copy of the segment table 168 * with PTEs instead of STEs. All are initialized in locore at boot time. 169 * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs. 170 * Segtabzero is an empty segment table which all processes share til they 171 * reference something. 172 */ 173 st_entry_t *Sysseg; 174 pt_entry_t *Sysmap, *Sysptmap; 175 st_entry_t *Segtabzero; 176 #if BSDVM_COMPAT 177 vm_size_t Sysptsize = VM_KERNEL_PT_PAGES + 4 / NPTEPG; 178 #else 179 vm_size_t Sysptsize = VM_KERNEL_PT_PAGES; 180 #endif 181 182 struct pmap kernel_pmap_store; 183 pmap_t kernel_pmap; 184 vm_map_t pt_map; 185 186 vm_offset_t avail_start; /* PA of first available physical page */ 187 vm_offset_t avail_end; /* PA of last available physical page */ 188 vm_size_t mem_size; /* memory size in bytes */ 189 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/ 190 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 191 vm_offset_t vm_first_phys; /* PA of first managed page */ 192 vm_offset_t vm_last_phys; /* PA just past last managed page */ 193 int hppagesperpage; /* PAGE_SIZE / HP_PAGE_SIZE */ 194 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 195 int pmap_aliasmask; /* seperation at which VA aliasing ok */ 196 char *pmap_attributes; /* reference and modify bits */ 197 198 boolean_t pmap_testbit(); 199 void pmap_enter_ptpage(); 200 201 #if BSDVM_COMPAT 202 #include "msgbuf.h" 203 204 /* 205 * All those kernel PT submaps that BSD is so fond of 206 */ 207 struct pte *CMAP1, *CMAP2, *mmap; 208 caddr_t CADDR1, CADDR2, vmmap; 209 struct pte *msgbufmap; 210 struct msgbuf *msgbufp; 211 #endif 212 213 /* 214 * Bootstrap the system enough to run with virtual memory. 215 * Map the kernel's code and data, and allocate the system page table. 216 * 217 * On the HP this is called after mapping has already been enabled 218 * and just syncs the pmap module with what has already been done. 219 * [We can't call it easily with mapping off since the kernel is not 220 * mapped with PA == VA, hence we would have to relocate every address 221 * from the linked base (virtual) address 0 to the actual (physical) 222 * address of 0xFFxxxxxx.] 223 */ 224 void 225 pmap_bootstrap(firstaddr, loadaddr) 226 vm_offset_t firstaddr; 227 vm_offset_t loadaddr; 228 { 229 #if BSDVM_COMPAT 230 vm_offset_t va; 231 struct pte *pte; 232 #endif 233 extern vm_offset_t maxmem, physmem; 234 235 avail_start = firstaddr; 236 avail_end = maxmem << PGSHIFT; 237 238 /* XXX: allow for msgbuf */ 239 avail_end -= hp300_round_page(sizeof(struct msgbuf)); 240 241 mem_size = physmem << PGSHIFT; 242 virtual_avail = VM_MIN_KERNEL_ADDRESS + (firstaddr - loadaddr); 243 virtual_end = VM_MAX_KERNEL_ADDRESS; 244 hppagesperpage = PAGE_SIZE / HP_PAGE_SIZE; 245 246 /* 247 * Determine VA aliasing distance if any 248 */ 249 if (ectype == EC_VIRT) 250 switch (machineid) { 251 case HP_320: 252 pmap_aliasmask = 0x3fff; /* 16k */ 253 break; 254 case HP_350: 255 pmap_aliasmask = 0x7fff; /* 32k */ 256 break; 257 } 258 259 /* 260 * Initialize protection array. 261 */ 262 hp300_protection_init(); 263 264 /* 265 * The kernel's pmap is statically allocated so we don't 266 * have to use pmap_create, which is unlikely to work 267 * correctly at this part of the boot sequence. 268 */ 269 kernel_pmap = &kernel_pmap_store; 270 271 /* 272 * Kernel page/segment table allocated in locore, 273 * just initialize pointers. 274 */ 275 kernel_pmap->pm_stab = Sysseg; 276 kernel_pmap->pm_ptab = Sysmap; 277 278 simple_lock_init(&kernel_pmap->pm_lock); 279 kernel_pmap->pm_count = 1; 280 281 #if BSDVM_COMPAT 282 /* 283 * Allocate all the submaps we need 284 */ 285 #define SYSMAP(c, p, v, n) \ 286 v = (c)va; va += ((n)*HP_PAGE_SIZE); p = pte; pte += (n); 287 288 va = virtual_avail; 289 pte = pmap_pte(kernel_pmap, va); 290 291 SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 ) 292 SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 ) 293 SYSMAP(caddr_t ,mmap ,vmmap ,1 ) 294 SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 ) 295 virtual_avail = va; 296 #endif 297 } 298 299 /* 300 * Initialize the pmap module. 301 * Called by vm_init, to initialize any structures that the pmap 302 * system needs to map virtual memory. 303 */ 304 void 305 pmap_init(phys_start, phys_end) 306 vm_offset_t phys_start, phys_end; 307 { 308 vm_offset_t addr, addr2; 309 vm_size_t npg, s; 310 int rv; 311 extern char kstack[]; 312 313 #ifdef DEBUG 314 if (pmapdebug & PDB_FOLLOW) 315 printf("pmap_init(%x, %x)\n", phys_start, phys_end); 316 #endif 317 /* 318 * Now that kernel map has been allocated, we can mark as 319 * unavailable regions which we have mapped in locore. 320 */ 321 addr = (vm_offset_t) intiobase; 322 (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0, 323 &addr, hp300_ptob(IIOMAPSIZE+EIOMAPSIZE), FALSE); 324 if (addr != (vm_offset_t)intiobase) 325 goto bogons; 326 addr = (vm_offset_t) Sysmap; 327 vm_object_reference(kernel_object); 328 (void) vm_map_find(kernel_map, kernel_object, addr, 329 &addr, HP_MAX_PTSIZE, FALSE); 330 /* 331 * If this fails it is probably because the static portion of 332 * the kernel page table isn't big enough and we overran the 333 * page table map. Need to adjust pmap_size() in hp300_init.c. 334 */ 335 if (addr != (vm_offset_t)Sysmap) 336 goto bogons; 337 338 addr = (vm_offset_t) kstack; 339 vm_object_reference(kernel_object); 340 (void) vm_map_find(kernel_map, kernel_object, addr, 341 &addr, hp300_ptob(UPAGES), FALSE); 342 if (addr != (vm_offset_t)kstack) 343 bogons: 344 panic("pmap_init: bogons in the VM system!\n"); 345 346 #ifdef DEBUG 347 if (pmapdebug & PDB_INIT) { 348 printf("pmap_init: Sysseg %x, Sysmap %x, Sysptmap %x\n", 349 Sysseg, Sysmap, Sysptmap); 350 printf(" pstart %x, pend %x, vstart %x, vend %x\n", 351 avail_start, avail_end, virtual_avail, virtual_end); 352 } 353 #endif 354 355 /* 356 * Allocate memory for random pmap data structures. Includes the 357 * initial segment table, pv_head_table and pmap_attributes. 358 */ 359 npg = atop(phys_end - phys_start); 360 s = (vm_size_t) (HP_STSIZE + sizeof(struct pv_entry) * npg + npg); 361 s = round_page(s); 362 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 363 Segtabzero = (st_entry_t *) addr; 364 addr += HP_STSIZE; 365 pv_table = (pv_entry_t) addr; 366 addr += sizeof(struct pv_entry) * npg; 367 pmap_attributes = (char *) addr; 368 #ifdef DEBUG 369 if (pmapdebug & PDB_INIT) 370 printf("pmap_init: %x bytes (%x pgs): seg %x tbl %x attr %x\n", 371 s, npg, Segtabzero, pv_table, pmap_attributes); 372 #endif 373 374 /* 375 * Allocate physical memory for kernel PT pages and their management. 376 * We need 1 PT page per possible task plus some slop. 377 */ 378 npg = min(atop(HP_MAX_KPTSIZE), maxproc+16); 379 s = ptoa(npg) + round_page(npg * sizeof(struct kpt_page)); 380 381 /* 382 * Verify that space will be allocated in region for which 383 * we already have kernel PT pages. 384 */ 385 addr = 0; 386 rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE); 387 if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap) 388 panic("pmap_init: kernel PT too small"); 389 vm_map_remove(kernel_map, addr, addr + s); 390 391 /* 392 * Now allocate the space and link the pages together to 393 * form the KPT free list. 394 */ 395 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 396 s = ptoa(npg); 397 addr2 = addr + s; 398 kpt_pages = &((struct kpt_page *)addr2)[npg]; 399 kpt_free_list = (struct kpt_page *) 0; 400 do { 401 addr2 -= HP_PAGE_SIZE; 402 (--kpt_pages)->kpt_next = kpt_free_list; 403 kpt_free_list = kpt_pages; 404 kpt_pages->kpt_va = addr2; 405 kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2); 406 } while (addr != addr2); 407 #ifdef DEBUG 408 kpt_stats.kpttotal = atop(s); 409 if (pmapdebug & PDB_INIT) 410 printf("pmap_init: KPT: %d pages from %x to %x\n", 411 atop(s), addr, addr + s); 412 #endif 413 414 /* 415 * Slightly modified version of kmem_suballoc() to get page table 416 * map where we want it. 417 */ 418 addr = HP_PTBASE; 419 s = min(HP_PTMAXSIZE, maxproc*HP_MAX_PTSIZE); 420 addr2 = addr + s; 421 rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE); 422 if (rv != KERN_SUCCESS) 423 panic("pmap_init: cannot allocate space for PT map"); 424 pmap_reference(vm_map_pmap(kernel_map)); 425 pt_map = vm_map_create(vm_map_pmap(kernel_map), addr, addr2, TRUE); 426 if (pt_map == NULL) 427 panic("pmap_init: cannot create pt_map"); 428 rv = vm_map_submap(kernel_map, addr, addr2, pt_map); 429 if (rv != KERN_SUCCESS) 430 panic("pmap_init: cannot map range to pt_map"); 431 #ifdef DEBUG 432 if (pmapdebug & PDB_INIT) 433 printf("pmap_init: pt_map [%x - %x)\n", addr, addr2); 434 #endif 435 436 /* 437 * Now it is safe to enable pv_table recording. 438 */ 439 vm_first_phys = phys_start; 440 vm_last_phys = phys_end; 441 pmap_initialized = TRUE; 442 } 443 444 /* 445 * Used to map a range of physical addresses into kernel 446 * virtual address space. 447 * 448 * For now, VM is already on, we only need to map the 449 * specified memory. 450 */ 451 vm_offset_t 452 pmap_map(virt, start, end, prot) 453 vm_offset_t virt; 454 vm_offset_t start; 455 vm_offset_t end; 456 int prot; 457 { 458 #ifdef DEBUG 459 if (pmapdebug & PDB_FOLLOW) 460 printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot); 461 #endif 462 while (start < end) { 463 pmap_enter(kernel_pmap, virt, start, prot, FALSE); 464 virt += PAGE_SIZE; 465 start += PAGE_SIZE; 466 } 467 return(virt); 468 } 469 470 /* 471 * Create and return a physical map. 472 * 473 * If the size specified for the map 474 * is zero, the map is an actual physical 475 * map, and may be referenced by the 476 * hardware. 477 * 478 * If the size specified is non-zero, 479 * the map will be used in software only, and 480 * is bounded by that size. 481 */ 482 pmap_t 483 pmap_create(size) 484 vm_size_t size; 485 { 486 register pmap_t pmap; 487 488 #ifdef DEBUG 489 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 490 printf("pmap_create(%x)\n", size); 491 #endif 492 /* 493 * Software use map does not need a pmap 494 */ 495 if (size) 496 return(NULL); 497 498 /* XXX: is it ok to wait here? */ 499 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); 500 #ifdef notifwewait 501 if (pmap == NULL) 502 panic("pmap_create: cannot allocate a pmap"); 503 #endif 504 bzero(pmap, sizeof(*pmap)); 505 pmap_pinit(pmap); 506 return (pmap); 507 } 508 509 /* 510 * Initialize a preallocated and zeroed pmap structure, 511 * such as one in a vmspace structure. 512 */ 513 void 514 pmap_pinit(pmap) 515 register struct pmap *pmap; 516 { 517 518 #ifdef DEBUG 519 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 520 printf("pmap_pinit(%x)\n", pmap); 521 #endif 522 /* 523 * No need to allocate page table space yet but we do need a 524 * valid segment table. Initially, we point everyone at the 525 * "null" segment table. On the first pmap_enter, a real 526 * segment table will be allocated. 527 */ 528 pmap->pm_stab = Segtabzero; 529 pmap->pm_stchanged = TRUE; 530 pmap->pm_count = 1; 531 simple_lock_init(&pmap->pm_lock); 532 } 533 534 /* 535 * Retire the given physical map from service. 536 * Should only be called if the map contains 537 * no valid mappings. 538 */ 539 void 540 pmap_destroy(pmap) 541 register pmap_t pmap; 542 { 543 int count; 544 545 #ifdef DEBUG 546 if (pmapdebug & PDB_FOLLOW) 547 printf("pmap_destroy(%x)\n", pmap); 548 #endif 549 if (pmap == NULL) 550 return; 551 552 simple_lock(&pmap->pm_lock); 553 count = --pmap->pm_count; 554 simple_unlock(&pmap->pm_lock); 555 if (count == 0) { 556 pmap_release(pmap); 557 free((caddr_t)pmap, M_VMPMAP); 558 } 559 } 560 561 /* 562 * Release any resources held by the given physical map. 563 * Called when a pmap initialized by pmap_pinit is being released. 564 * Should only be called if the map contains no valid mappings. 565 */ 566 void 567 pmap_release(pmap) 568 register struct pmap *pmap; 569 { 570 571 #ifdef DEBUG 572 if (pmapdebug & PDB_FOLLOW) 573 printf("pmap_release(%x)\n", pmap); 574 #endif 575 #ifdef notdef /* DIAGNOSTIC */ 576 /* count would be 0 from pmap_destroy... */ 577 simple_lock(&pmap->pm_lock); 578 if (pmap->pm_count != 1) 579 panic("pmap_release count"); 580 #endif 581 if (pmap->pm_ptab) 582 kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab, 583 HP_MAX_PTSIZE); 584 if (pmap->pm_stab != Segtabzero) 585 kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, HP_STSIZE); 586 } 587 588 /* 589 * Add a reference to the specified pmap. 590 */ 591 void 592 pmap_reference(pmap) 593 pmap_t pmap; 594 { 595 #ifdef DEBUG 596 if (pmapdebug & PDB_FOLLOW) 597 printf("pmap_reference(%x)\n", pmap); 598 #endif 599 if (pmap != NULL) { 600 simple_lock(&pmap->pm_lock); 601 pmap->pm_count++; 602 simple_unlock(&pmap->pm_lock); 603 } 604 } 605 606 /* 607 * Remove the given range of addresses from the specified map. 608 * 609 * It is assumed that the start and end are properly 610 * rounded to the page size. 611 */ 612 void 613 pmap_remove(pmap, sva, eva) 614 register pmap_t pmap; 615 vm_offset_t sva, eva; 616 { 617 register vm_offset_t pa, va; 618 register pt_entry_t *pte; 619 register pv_entry_t pv, npv; 620 register int ix; 621 pmap_t ptpmap; 622 int *ste, s, bits; 623 boolean_t firstpage = TRUE; 624 boolean_t flushcache = FALSE; 625 #ifdef DEBUG 626 pt_entry_t opte; 627 628 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 629 printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva); 630 #endif 631 632 if (pmap == NULL) 633 return; 634 635 #ifdef DEBUG 636 remove_stats.calls++; 637 #endif 638 for (va = sva; va < eva; va += PAGE_SIZE) { 639 /* 640 * Weed out invalid mappings. 641 * Note: we assume that the segment table is always allocated. 642 */ 643 if (!pmap_ste_v(pmap_ste(pmap, va))) { 644 /* XXX: avoid address wrap around */ 645 if (va >= hp300_trunc_seg((vm_offset_t)-1)) 646 break; 647 va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE; 648 continue; 649 } 650 pte = pmap_pte(pmap, va); 651 pa = pmap_pte_pa(pte); 652 if (pa == 0) 653 continue; 654 /* 655 * Invalidating a non-CI page, must flush external VAC 656 * unless it is a supervisor mapping and we have already 657 * flushed the supervisor side. 658 */ 659 if (pmap_aliasmask && !pmap_pte_ci(pte) && 660 !(pmap == kernel_pmap && firstpage)) 661 flushcache = TRUE; 662 #ifdef DEBUG 663 opte = *pte; 664 remove_stats.removes++; 665 #endif 666 /* 667 * Update statistics 668 */ 669 if (pmap_pte_w(pte)) 670 pmap->pm_stats.wired_count--; 671 pmap->pm_stats.resident_count--; 672 673 /* 674 * Invalidate the PTEs. 675 * XXX: should cluster them up and invalidate as many 676 * as possible at once. 677 */ 678 #ifdef DEBUG 679 if (pmapdebug & PDB_REMOVE) 680 printf("remove: invalidating %x ptes at %x\n", 681 hppagesperpage, pte); 682 #endif 683 /* 684 * Flush VAC to ensure we get the correct state of any 685 * hardware maintained bits. 686 */ 687 if (firstpage && pmap_aliasmask) { 688 firstpage = FALSE; 689 if (pmap == kernel_pmap) 690 flushcache = FALSE; 691 DCIS(); 692 #ifdef DEBUG 693 remove_stats.sflushes++; 694 #endif 695 } 696 bits = ix = 0; 697 do { 698 bits |= *(int *)pte & (PG_U|PG_M); 699 *(int *)pte++ = PG_NV; 700 TBIS(va + ix * HP_PAGE_SIZE); 701 } while (++ix != hppagesperpage); 702 703 /* 704 * For user mappings decrement the wiring count on 705 * the PT page. We do this after the PTE has been 706 * invalidated because vm_map_pageable winds up in 707 * pmap_pageable which clears the modify bit for the 708 * PT page. 709 */ 710 if (pmap != kernel_pmap) { 711 pte = pmap_pte(pmap, va); 712 vm_map_pageable(pt_map, trunc_page(pte), 713 round_page(pte+1), TRUE); 714 #ifdef DEBUG 715 if (pmapdebug & PDB_WIRING) 716 pmap_check_wiring("remove", trunc_page(pte)); 717 #endif 718 } 719 /* 720 * Remove from the PV table (raise IPL since we 721 * may be called at interrupt time). 722 */ 723 if (pa < vm_first_phys || pa >= vm_last_phys) 724 continue; 725 pv = pa_to_pvh(pa); 726 ste = (int *)0; 727 s = splimp(); 728 /* 729 * If it is the first entry on the list, it is actually 730 * in the header and we must copy the following entry up 731 * to the header. Otherwise we must search the list for 732 * the entry. In either case we free the now unused entry. 733 */ 734 if (pmap == pv->pv_pmap && va == pv->pv_va) { 735 ste = (int *)pv->pv_ptste; 736 ptpmap = pv->pv_ptpmap; 737 npv = pv->pv_next; 738 if (npv) { 739 *pv = *npv; 740 free((caddr_t)npv, M_VMPVENT); 741 } else 742 pv->pv_pmap = NULL; 743 #ifdef DEBUG 744 remove_stats.pvfirst++; 745 #endif 746 } else { 747 for (npv = pv->pv_next; npv; npv = npv->pv_next) { 748 #ifdef DEBUG 749 remove_stats.pvsearch++; 750 #endif 751 if (pmap == npv->pv_pmap && va == npv->pv_va) 752 break; 753 pv = npv; 754 } 755 #ifdef DEBUG 756 if (npv == NULL) 757 panic("pmap_remove: PA not in pv_tab"); 758 #endif 759 ste = (int *)npv->pv_ptste; 760 ptpmap = npv->pv_ptpmap; 761 pv->pv_next = npv->pv_next; 762 free((caddr_t)npv, M_VMPVENT); 763 pv = pa_to_pvh(pa); 764 } 765 /* 766 * If only one mapping left we no longer need to cache inhibit 767 */ 768 if (pv->pv_pmap && 769 pv->pv_next == NULL && (pv->pv_flags & PV_CI)) { 770 #ifdef DEBUG 771 if (pmapdebug & PDB_CACHE) 772 printf("remove: clearing CI for pa %x\n", pa); 773 #endif 774 pv->pv_flags &= ~PV_CI; 775 pmap_changebit(pa, PG_CI, FALSE); 776 #ifdef DEBUG 777 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) == 778 (PDB_CACHE|PDB_PVDUMP)) 779 pmap_pvdump(pa); 780 #endif 781 } 782 783 /* 784 * If this was a PT page we must also remove the 785 * mapping from the associated segment table. 786 */ 787 if (ste) { 788 #ifdef DEBUG 789 remove_stats.ptinvalid++; 790 if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE)) { 791 printf("remove: ste was %x@%x pte was %x@%x\n", 792 *ste, ste, 793 *(int *)&opte, pmap_pte(pmap, va)); 794 } 795 #endif 796 *ste = SG_NV; 797 /* 798 * If it was a user PT page, we decrement the 799 * reference count on the segment table as well, 800 * freeing it if it is now empty. 801 */ 802 if (ptpmap != kernel_pmap) { 803 #ifdef DEBUG 804 if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB)) 805 printf("remove: stab %x, refcnt %d\n", 806 ptpmap->pm_stab, 807 ptpmap->pm_sref - 1); 808 if ((pmapdebug & PDB_PARANOIA) && 809 ptpmap->pm_stab != (st_entry_t *)trunc_page(ste)) 810 panic("remove: bogus ste"); 811 #endif 812 if (--(ptpmap->pm_sref) == 0) { 813 #ifdef DEBUG 814 if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB)) 815 printf("remove: free stab %x\n", 816 ptpmap->pm_stab); 817 #endif 818 kmem_free(kernel_map, 819 (vm_offset_t)ptpmap->pm_stab, 820 HP_STSIZE); 821 ptpmap->pm_stab = Segtabzero; 822 ptpmap->pm_stchanged = TRUE; 823 /* 824 * XXX may have changed segment table 825 * pointer for current process so 826 * update now to reload hardware. 827 */ 828 if (ptpmap == curproc->p_vmspace->vm_map.pmap) 829 PMAP_ACTIVATE(ptpmap, 830 (struct pcb *)curproc->p_addr, 1); 831 } 832 } 833 if (ptpmap == kernel_pmap) 834 TBIAS(); 835 else 836 TBIAU(); 837 pv->pv_flags &= ~PV_PTPAGE; 838 ptpmap->pm_ptpages--; 839 } 840 /* 841 * Update saved attributes for managed page 842 */ 843 pmap_attributes[pa_index(pa)] |= bits; 844 splx(s); 845 } 846 #ifdef DEBUG 847 if (pmapvacflush & PVF_REMOVE) { 848 if (pmapvacflush & PVF_TOTAL) 849 DCIA(); 850 else if (pmap == kernel_pmap) 851 DCIS(); 852 else 853 DCIU(); 854 } 855 #endif 856 if (flushcache) { 857 if (pmap == kernel_pmap) { 858 DCIS(); 859 #ifdef DEBUG 860 remove_stats.sflushes++; 861 #endif 862 } else { 863 DCIU(); 864 #ifdef DEBUG 865 remove_stats.uflushes++; 866 #endif 867 } 868 } 869 } 870 871 /* 872 * pmap_page_protect: 873 * 874 * Lower the permission for all mappings to a given page. 875 */ 876 void 877 pmap_page_protect(pa, prot) 878 vm_offset_t pa; 879 vm_prot_t prot; 880 { 881 register pv_entry_t pv; 882 int s; 883 884 #ifdef DEBUG 885 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || 886 prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)) 887 printf("pmap_page_protect(%x, %x)\n", pa, prot); 888 #endif 889 if (pa < vm_first_phys || pa >= vm_last_phys) 890 return; 891 892 switch (prot) { 893 case VM_PROT_ALL: 894 break; 895 /* copy_on_write */ 896 case VM_PROT_READ: 897 case VM_PROT_READ|VM_PROT_EXECUTE: 898 pmap_changebit(pa, PG_RO, TRUE); 899 break; 900 /* remove_all */ 901 default: 902 pv = pa_to_pvh(pa); 903 s = splimp(); 904 while (pv->pv_pmap != NULL) { 905 #ifdef DEBUG 906 if (!pmap_ste_v(pmap_ste(pv->pv_pmap,pv->pv_va)) || 907 pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)) != pa) 908 panic("pmap_page_protect: bad mapping"); 909 #endif 910 pmap_remove(pv->pv_pmap, pv->pv_va, 911 pv->pv_va + PAGE_SIZE); 912 } 913 splx(s); 914 break; 915 } 916 } 917 918 /* 919 * Set the physical protection on the 920 * specified range of this map as requested. 921 */ 922 void 923 pmap_protect(pmap, sva, eva, prot) 924 register pmap_t pmap; 925 vm_offset_t sva, eva; 926 vm_prot_t prot; 927 { 928 register pt_entry_t *pte; 929 register vm_offset_t va; 930 register int ix; 931 int hpprot; 932 boolean_t firstpage = TRUE; 933 934 #ifdef DEBUG 935 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 936 printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot); 937 #endif 938 if (pmap == NULL) 939 return; 940 941 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 942 pmap_remove(pmap, sva, eva); 943 return; 944 } 945 if (prot & VM_PROT_WRITE) 946 return; 947 948 pte = pmap_pte(pmap, sva); 949 hpprot = pte_prot(pmap, prot) == PG_RO ? 1 : 0; 950 for (va = sva; va < eva; va += PAGE_SIZE) { 951 /* 952 * Page table page is not allocated. 953 * Skip it, we don't want to force allocation 954 * of unnecessary PTE pages just to set the protection. 955 */ 956 if (!pmap_ste_v(pmap_ste(pmap, va))) { 957 /* XXX: avoid address wrap around */ 958 if (va >= hp300_trunc_seg((vm_offset_t)-1)) 959 break; 960 va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE; 961 pte = pmap_pte(pmap, va); 962 pte += hppagesperpage; 963 continue; 964 } 965 /* 966 * Page not valid. Again, skip it. 967 * Should we do this? Or set protection anyway? 968 */ 969 if (!pmap_pte_v(pte)) { 970 pte += hppagesperpage; 971 continue; 972 } 973 /* 974 * Flush VAC to ensure we get correct state of HW bits 975 * so we don't clobber them. 976 */ 977 if (firstpage && pmap_aliasmask) { 978 firstpage = FALSE; 979 DCIS(); 980 } 981 ix = 0; 982 do { 983 /* clear VAC here if PG_RO? */ 984 pmap_pte_set_prot(pte++, hpprot); 985 TBIS(va + ix * HP_PAGE_SIZE); 986 } while (++ix != hppagesperpage); 987 } 988 #ifdef DEBUG 989 if (hpprot && (pmapvacflush & PVF_PROTECT)) { 990 if (pmapvacflush & PVF_TOTAL) 991 DCIA(); 992 else if (pmap == kernel_pmap) 993 DCIS(); 994 else 995 DCIU(); 996 } 997 #endif 998 } 999 1000 /* 1001 * Insert the given physical page (p) at 1002 * the specified virtual address (v) in the 1003 * target physical map with the protection requested. 1004 * 1005 * If specified, the page will be wired down, meaning 1006 * that the related pte can not be reclaimed. 1007 * 1008 * NB: This is the only routine which MAY NOT lazy-evaluate 1009 * or lose information. That is, this routine must actually 1010 * insert this page into the given map NOW. 1011 */ 1012 void 1013 pmap_enter(pmap, va, pa, prot, wired) 1014 register pmap_t pmap; 1015 vm_offset_t va; 1016 register vm_offset_t pa; 1017 vm_prot_t prot; 1018 boolean_t wired; 1019 { 1020 register pt_entry_t *pte; 1021 register int npte, ix; 1022 vm_offset_t opa; 1023 boolean_t cacheable = TRUE; 1024 boolean_t checkpv = TRUE; 1025 1026 #ifdef DEBUG 1027 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 1028 printf("pmap_enter(%x, %x, %x, %x, %x)\n", 1029 pmap, va, pa, prot, wired); 1030 #endif 1031 if (pmap == NULL) 1032 return; 1033 1034 #ifdef DEBUG 1035 if (pmap == kernel_pmap) 1036 enter_stats.kernel++; 1037 else 1038 enter_stats.user++; 1039 #endif 1040 /* 1041 * For user mapping, allocate kernel VM resources if necessary. 1042 */ 1043 if (pmap->pm_ptab == NULL) 1044 pmap->pm_ptab = (pt_entry_t *) 1045 kmem_alloc_wait(pt_map, HP_MAX_PTSIZE); 1046 1047 /* 1048 * Segment table entry not valid, we need a new PT page 1049 */ 1050 if (!pmap_ste_v(pmap_ste(pmap, va))) 1051 pmap_enter_ptpage(pmap, va); 1052 1053 pte = pmap_pte(pmap, va); 1054 opa = pmap_pte_pa(pte); 1055 #ifdef DEBUG 1056 if (pmapdebug & PDB_ENTER) 1057 printf("enter: pte %x, *pte %x\n", pte, *(int *)pte); 1058 #endif 1059 1060 /* 1061 * Mapping has not changed, must be protection or wiring change. 1062 */ 1063 if (opa == pa) { 1064 #ifdef DEBUG 1065 enter_stats.pwchange++; 1066 #endif 1067 /* 1068 * Wiring change, just update stats. 1069 * We don't worry about wiring PT pages as they remain 1070 * resident as long as there are valid mappings in them. 1071 * Hence, if a user page is wired, the PT page will be also. 1072 */ 1073 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { 1074 #ifdef DEBUG 1075 if (pmapdebug & PDB_ENTER) 1076 printf("enter: wiring change -> %x\n", wired); 1077 #endif 1078 if (wired) 1079 pmap->pm_stats.wired_count++; 1080 else 1081 pmap->pm_stats.wired_count--; 1082 #ifdef DEBUG 1083 enter_stats.wchange++; 1084 #endif 1085 } 1086 /* 1087 * Retain cache inhibition status 1088 */ 1089 checkpv = FALSE; 1090 if (pmap_pte_ci(pte)) 1091 cacheable = FALSE; 1092 goto validate; 1093 } 1094 1095 /* 1096 * Mapping has changed, invalidate old range and fall through to 1097 * handle validating new mapping. 1098 */ 1099 if (opa) { 1100 #ifdef DEBUG 1101 if (pmapdebug & PDB_ENTER) 1102 printf("enter: removing old mapping %x\n", va); 1103 #endif 1104 pmap_remove(pmap, va, va + PAGE_SIZE); 1105 #ifdef DEBUG 1106 enter_stats.mchange++; 1107 #endif 1108 } 1109 1110 /* 1111 * If this is a new user mapping, increment the wiring count 1112 * on this PT page. PT pages are wired down as long as there 1113 * is a valid mapping in the page. 1114 */ 1115 if (pmap != kernel_pmap) 1116 vm_map_pageable(pt_map, trunc_page(pte), 1117 round_page(pte+1), FALSE); 1118 1119 /* 1120 * Enter on the PV list if part of our managed memory 1121 * Note that we raise IPL while manipulating pv_table 1122 * since pmap_enter can be called at interrupt time. 1123 */ 1124 if (pa >= vm_first_phys && pa < vm_last_phys) { 1125 register pv_entry_t pv, npv; 1126 int s; 1127 1128 #ifdef DEBUG 1129 enter_stats.managed++; 1130 #endif 1131 pv = pa_to_pvh(pa); 1132 s = splimp(); 1133 #ifdef DEBUG 1134 if (pmapdebug & PDB_ENTER) 1135 printf("enter: pv at %x: %x/%x/%x\n", 1136 pv, pv->pv_va, pv->pv_pmap, pv->pv_next); 1137 #endif 1138 /* 1139 * No entries yet, use header as the first entry 1140 */ 1141 if (pv->pv_pmap == NULL) { 1142 #ifdef DEBUG 1143 enter_stats.firstpv++; 1144 #endif 1145 pv->pv_va = va; 1146 pv->pv_pmap = pmap; 1147 pv->pv_next = NULL; 1148 pv->pv_ptste = NULL; 1149 pv->pv_ptpmap = NULL; 1150 pv->pv_flags = 0; 1151 } 1152 /* 1153 * There is at least one other VA mapping this page. 1154 * Place this entry after the header. 1155 */ 1156 else { 1157 #ifdef DEBUG 1158 for (npv = pv; npv; npv = npv->pv_next) 1159 if (pmap == npv->pv_pmap && va == npv->pv_va) 1160 panic("pmap_enter: already in pv_tab"); 1161 #endif 1162 npv = (pv_entry_t) 1163 malloc(sizeof *npv, M_VMPVENT, M_NOWAIT); 1164 npv->pv_va = va; 1165 npv->pv_pmap = pmap; 1166 npv->pv_next = pv->pv_next; 1167 npv->pv_ptste = NULL; 1168 npv->pv_ptpmap = NULL; 1169 pv->pv_next = npv; 1170 #ifdef DEBUG 1171 if (!npv->pv_next) 1172 enter_stats.secondpv++; 1173 #endif 1174 /* 1175 * Since there is another logical mapping for the 1176 * same page we may need to cache-inhibit the 1177 * descriptors on those CPUs with external VACs. 1178 * We don't need to CI if: 1179 * 1180 * - No two mappings belong to the same user pmaps. 1181 * Since the cache is flushed on context switches 1182 * there is no problem between user processes. 1183 * 1184 * - Mappings within a single pmap are a certain 1185 * magic distance apart. VAs at these appropriate 1186 * boundaries map to the same cache entries or 1187 * otherwise don't conflict. 1188 * 1189 * To keep it simple, we only check for these special 1190 * cases if there are only two mappings, otherwise we 1191 * punt and always CI. 1192 * 1193 * Note that there are no aliasing problems with the 1194 * on-chip data-cache when the WA bit is set. 1195 */ 1196 if (pmap_aliasmask) { 1197 if (pv->pv_flags & PV_CI) { 1198 #ifdef DEBUG 1199 if (pmapdebug & PDB_CACHE) 1200 printf("enter: pa %x already CI'ed\n", 1201 pa); 1202 #endif 1203 checkpv = cacheable = FALSE; 1204 } else if (npv->pv_next || 1205 ((pmap == pv->pv_pmap || 1206 pmap == kernel_pmap || 1207 pv->pv_pmap == kernel_pmap) && 1208 ((pv->pv_va & pmap_aliasmask) != 1209 (va & pmap_aliasmask)))) { 1210 #ifdef DEBUG 1211 if (pmapdebug & PDB_CACHE) 1212 printf("enter: pa %x CI'ing all\n", 1213 pa); 1214 #endif 1215 cacheable = FALSE; 1216 pv->pv_flags |= PV_CI; 1217 #ifdef DEBUG 1218 enter_stats.ci++; 1219 #endif 1220 } 1221 } 1222 } 1223 splx(s); 1224 } 1225 /* 1226 * Assumption: if it is not part of our managed memory 1227 * then it must be device memory which may be volitile. 1228 */ 1229 else if (pmap_initialized) { 1230 checkpv = cacheable = FALSE; 1231 #ifdef DEBUG 1232 enter_stats.unmanaged++; 1233 #endif 1234 } 1235 1236 /* 1237 * Increment counters 1238 */ 1239 pmap->pm_stats.resident_count++; 1240 if (wired) 1241 pmap->pm_stats.wired_count++; 1242 1243 validate: 1244 /* 1245 * Flush VAC to ensure we get correct state of HW bits 1246 * so we don't clobber them. 1247 */ 1248 if (pmap_aliasmask) 1249 DCIS(); 1250 /* 1251 * Now validate mapping with desired protection/wiring. 1252 * Assume uniform modified and referenced status for all 1253 * HP pages in a MACH page. 1254 */ 1255 npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V; 1256 npte |= (*(int *)pte & (PG_M|PG_U)); 1257 if (wired) 1258 npte |= PG_W; 1259 if (!checkpv && !cacheable) 1260 npte |= PG_CI; 1261 #ifdef DEBUG 1262 if (pmapdebug & PDB_ENTER) 1263 printf("enter: new pte value %x\n", npte); 1264 #endif 1265 ix = 0; 1266 do { 1267 *(int *)pte++ = npte; 1268 TBIS(va); 1269 npte += HP_PAGE_SIZE; 1270 va += HP_PAGE_SIZE; 1271 } while (++ix != hppagesperpage); 1272 /* 1273 * The following is executed if we are entering a second 1274 * (or greater) mapping for a physical page and the mappings 1275 * may create an aliasing problem. In this case we must 1276 * cache inhibit the descriptors involved and flush any 1277 * external VAC. 1278 */ 1279 if (checkpv && !cacheable) { 1280 pmap_changebit(pa, PG_CI, TRUE); 1281 DCIA(); 1282 #ifdef DEBUG 1283 enter_stats.flushes++; 1284 #endif 1285 #ifdef DEBUG 1286 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) == 1287 (PDB_CACHE|PDB_PVDUMP)) 1288 pmap_pvdump(pa); 1289 #endif 1290 } 1291 #ifdef DEBUG 1292 else if (pmapvacflush & PVF_ENTER) { 1293 if (pmapvacflush & PVF_TOTAL) 1294 DCIA(); 1295 else if (pmap == kernel_pmap) 1296 DCIS(); 1297 else 1298 DCIU(); 1299 } 1300 if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap) { 1301 va -= PAGE_SIZE; 1302 pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va))); 1303 } 1304 #endif 1305 } 1306 1307 /* 1308 * Routine: pmap_change_wiring 1309 * Function: Change the wiring attribute for a map/virtual-address 1310 * pair. 1311 * In/out conditions: 1312 * The mapping must already exist in the pmap. 1313 */ 1314 void 1315 pmap_change_wiring(pmap, va, wired) 1316 register pmap_t pmap; 1317 vm_offset_t va; 1318 boolean_t wired; 1319 { 1320 register pt_entry_t *pte; 1321 register int ix; 1322 1323 #ifdef DEBUG 1324 if (pmapdebug & PDB_FOLLOW) 1325 printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired); 1326 #endif 1327 if (pmap == NULL) 1328 return; 1329 1330 pte = pmap_pte(pmap, va); 1331 #ifdef DEBUG 1332 /* 1333 * Page table page is not allocated. 1334 * Should this ever happen? Ignore it for now, 1335 * we don't want to force allocation of unnecessary PTE pages. 1336 */ 1337 if (!pmap_ste_v(pmap_ste(pmap, va))) { 1338 if (pmapdebug & PDB_PARANOIA) 1339 printf("pmap_change_wiring: invalid STE for %x\n", va); 1340 return; 1341 } 1342 /* 1343 * Page not valid. Should this ever happen? 1344 * Just continue and change wiring anyway. 1345 */ 1346 if (!pmap_pte_v(pte)) { 1347 if (pmapdebug & PDB_PARANOIA) 1348 printf("pmap_change_wiring: invalid PTE for %x\n", va); 1349 } 1350 #endif 1351 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { 1352 if (wired) 1353 pmap->pm_stats.wired_count++; 1354 else 1355 pmap->pm_stats.wired_count--; 1356 } 1357 /* 1358 * Wiring is not a hardware characteristic so there is no need 1359 * to invalidate TLB. 1360 */ 1361 ix = 0; 1362 do { 1363 pmap_pte_set_w(pte++, wired); 1364 } while (++ix != hppagesperpage); 1365 } 1366 1367 /* 1368 * Routine: pmap_extract 1369 * Function: 1370 * Extract the physical page address associated 1371 * with the given map/virtual_address pair. 1372 */ 1373 1374 vm_offset_t 1375 pmap_extract(pmap, va) 1376 register pmap_t pmap; 1377 vm_offset_t va; 1378 { 1379 register vm_offset_t pa; 1380 1381 #ifdef DEBUG 1382 if (pmapdebug & PDB_FOLLOW) 1383 printf("pmap_extract(%x, %x) -> ", pmap, va); 1384 #endif 1385 pa = 0; 1386 if (pmap && pmap_ste_v(pmap_ste(pmap, va))) 1387 pa = *(int *)pmap_pte(pmap, va); 1388 if (pa) 1389 pa = (pa & PG_FRAME) | (va & ~PG_FRAME); 1390 #ifdef DEBUG 1391 if (pmapdebug & PDB_FOLLOW) 1392 printf("%x\n", pa); 1393 #endif 1394 return(pa); 1395 } 1396 1397 /* 1398 * Copy the range specified by src_addr/len 1399 * from the source map to the range dst_addr/len 1400 * in the destination map. 1401 * 1402 * This routine is only advisory and need not do anything. 1403 */ 1404 void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 1405 pmap_t dst_pmap; 1406 pmap_t src_pmap; 1407 vm_offset_t dst_addr; 1408 vm_size_t len; 1409 vm_offset_t src_addr; 1410 { 1411 #ifdef DEBUG 1412 if (pmapdebug & PDB_FOLLOW) 1413 printf("pmap_copy(%x, %x, %x, %x, %x)\n", 1414 dst_pmap, src_pmap, dst_addr, len, src_addr); 1415 #endif 1416 } 1417 1418 /* 1419 * Require that all active physical maps contain no 1420 * incorrect entries NOW. [This update includes 1421 * forcing updates of any address map caching.] 1422 * 1423 * Generally used to insure that a thread about 1424 * to run will see a semantically correct world. 1425 */ 1426 void pmap_update() 1427 { 1428 #ifdef DEBUG 1429 if (pmapdebug & PDB_FOLLOW) 1430 printf("pmap_update()\n"); 1431 #endif 1432 TBIA(); 1433 } 1434 1435 /* 1436 * Routine: pmap_collect 1437 * Function: 1438 * Garbage collects the physical map system for 1439 * pages which are no longer used. 1440 * Success need not be guaranteed -- that is, there 1441 * may well be pages which are not referenced, but 1442 * others may be collected. 1443 * Usage: 1444 * Called by the pageout daemon when pages are scarce. 1445 */ 1446 void 1447 pmap_collect(pmap) 1448 pmap_t pmap; 1449 { 1450 register vm_offset_t pa; 1451 register pv_entry_t pv; 1452 register int *pte; 1453 vm_offset_t kpa; 1454 int s; 1455 1456 #ifdef DEBUG 1457 int *ste; 1458 int opmapdebug; 1459 #endif 1460 if (pmap != kernel_pmap) 1461 return; 1462 1463 #ifdef DEBUG 1464 if (pmapdebug & PDB_FOLLOW) 1465 printf("pmap_collect(%x)\n", pmap); 1466 kpt_stats.collectscans++; 1467 #endif 1468 s = splimp(); 1469 for (pa = vm_first_phys; pa < vm_last_phys; pa += PAGE_SIZE) { 1470 register struct kpt_page *kpt, **pkpt; 1471 1472 /* 1473 * Locate physical pages which are being used as kernel 1474 * page table pages. 1475 */ 1476 pv = pa_to_pvh(pa); 1477 if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE)) 1478 continue; 1479 do { 1480 if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap) 1481 break; 1482 } while (pv = pv->pv_next); 1483 if (pv == NULL) 1484 continue; 1485 #ifdef DEBUG 1486 if (pv->pv_va < (vm_offset_t)Sysmap || 1487 pv->pv_va >= (vm_offset_t)Sysmap + HP_MAX_PTSIZE) 1488 printf("collect: kernel PT VA out of range\n"); 1489 else 1490 goto ok; 1491 pmap_pvdump(pa); 1492 continue; 1493 ok: 1494 #endif 1495 pte = (int *)(pv->pv_va + HP_PAGE_SIZE); 1496 while (--pte >= (int *)pv->pv_va && *pte == PG_NV) 1497 ; 1498 if (pte >= (int *)pv->pv_va) 1499 continue; 1500 1501 #ifdef DEBUG 1502 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) { 1503 printf("collect: freeing KPT page at %x (ste %x@%x)\n", 1504 pv->pv_va, *(int *)pv->pv_ptste, pv->pv_ptste); 1505 opmapdebug = pmapdebug; 1506 pmapdebug |= PDB_PTPAGE; 1507 } 1508 1509 ste = (int *)pv->pv_ptste; 1510 #endif 1511 /* 1512 * If all entries were invalid we can remove the page. 1513 * We call pmap_remove to take care of invalidating ST 1514 * and Sysptmap entries. 1515 */ 1516 kpa = pmap_extract(pmap, pv->pv_va); 1517 pmap_remove(pmap, pv->pv_va, pv->pv_va + HP_PAGE_SIZE); 1518 /* 1519 * Use the physical address to locate the original 1520 * (kmem_alloc assigned) address for the page and put 1521 * that page back on the free list. 1522 */ 1523 for (pkpt = &kpt_used_list, kpt = *pkpt; 1524 kpt != (struct kpt_page *)0; 1525 pkpt = &kpt->kpt_next, kpt = *pkpt) 1526 if (kpt->kpt_pa == kpa) 1527 break; 1528 #ifdef DEBUG 1529 if (kpt == (struct kpt_page *)0) 1530 panic("pmap_collect: lost a KPT page"); 1531 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) 1532 printf("collect: %x (%x) to free list\n", 1533 kpt->kpt_va, kpa); 1534 #endif 1535 *pkpt = kpt->kpt_next; 1536 kpt->kpt_next = kpt_free_list; 1537 kpt_free_list = kpt; 1538 #ifdef DEBUG 1539 kpt_stats.kptinuse--; 1540 kpt_stats.collectpages++; 1541 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) 1542 pmapdebug = opmapdebug; 1543 1544 if (*ste) 1545 printf("collect: kernel STE at %x still valid (%x)\n", 1546 ste, *ste); 1547 ste = (int *)&Sysptmap[(st_entry_t *)ste-pmap_ste(kernel_pmap, 0)]; 1548 if (*ste) 1549 printf("collect: kernel PTmap at %x still valid (%x)\n", 1550 ste, *ste); 1551 #endif 1552 } 1553 splx(s); 1554 } 1555 1556 void 1557 pmap_activate(pmap, pcbp) 1558 register pmap_t pmap; 1559 struct pcb *pcbp; 1560 { 1561 #ifdef DEBUG 1562 if (pmapdebug & (PDB_FOLLOW|PDB_SEGTAB)) 1563 printf("pmap_activate(%x, %x)\n", pmap, pcbp); 1564 #endif 1565 PMAP_ACTIVATE(pmap, pcbp, pmap == curproc->p_vmspace->vm_map.pmap); 1566 } 1567 1568 /* 1569 * pmap_zero_page zeros the specified (machine independent) 1570 * page by mapping the page into virtual memory and using 1571 * bzero to clear its contents, one machine dependent page 1572 * at a time. 1573 */ 1574 pmap_zero_page(phys) 1575 register vm_offset_t phys; 1576 { 1577 register int ix; 1578 1579 #ifdef DEBUG 1580 if (pmapdebug & PDB_FOLLOW) 1581 printf("pmap_zero_page(%x)\n", phys); 1582 #endif 1583 phys >>= PG_SHIFT; 1584 ix = 0; 1585 do { 1586 clearseg(phys++); 1587 } while (++ix != hppagesperpage); 1588 } 1589 1590 /* 1591 * pmap_copy_page copies the specified (machine independent) 1592 * page by mapping the page into virtual memory and using 1593 * bcopy to copy the page, one machine dependent page at a 1594 * time. 1595 */ 1596 pmap_copy_page(src, dst) 1597 register vm_offset_t src, dst; 1598 { 1599 register int ix; 1600 1601 #ifdef DEBUG 1602 if (pmapdebug & PDB_FOLLOW) 1603 printf("pmap_copy_page(%x, %x)\n", src, dst); 1604 #endif 1605 src >>= PG_SHIFT; 1606 dst >>= PG_SHIFT; 1607 ix = 0; 1608 do { 1609 physcopyseg(src++, dst++); 1610 } while (++ix != hppagesperpage); 1611 } 1612 1613 1614 /* 1615 * Routine: pmap_pageable 1616 * Function: 1617 * Make the specified pages (by pmap, offset) 1618 * pageable (or not) as requested. 1619 * 1620 * A page which is not pageable may not take 1621 * a fault; therefore, its page table entry 1622 * must remain valid for the duration. 1623 * 1624 * This routine is merely advisory; pmap_enter 1625 * will specify that these pages are to be wired 1626 * down (or not) as appropriate. 1627 */ 1628 pmap_pageable(pmap, sva, eva, pageable) 1629 pmap_t pmap; 1630 vm_offset_t sva, eva; 1631 boolean_t pageable; 1632 { 1633 #ifdef DEBUG 1634 if (pmapdebug & PDB_FOLLOW) 1635 printf("pmap_pageable(%x, %x, %x, %x)\n", 1636 pmap, sva, eva, pageable); 1637 #endif 1638 /* 1639 * If we are making a PT page pageable then all valid 1640 * mappings must be gone from that page. Hence it should 1641 * be all zeros and there is no need to clean it. 1642 * Assumptions: 1643 * - we are called with only one page at a time 1644 * - PT pages have only one pv_table entry 1645 */ 1646 if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) { 1647 register pv_entry_t pv; 1648 register vm_offset_t pa; 1649 1650 #ifdef DEBUG 1651 if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE) 1652 printf("pmap_pageable(%x, %x, %x, %x)\n", 1653 pmap, sva, eva, pageable); 1654 #endif 1655 if (!pmap_ste_v(pmap_ste(pmap, sva))) 1656 return; 1657 pa = pmap_pte_pa(pmap_pte(pmap, sva)); 1658 if (pa < vm_first_phys || pa >= vm_last_phys) 1659 return; 1660 pv = pa_to_pvh(pa); 1661 if (pv->pv_ptste == NULL) 1662 return; 1663 #ifdef DEBUG 1664 if (pv->pv_va != sva || pv->pv_next) { 1665 printf("pmap_pageable: bad PT page va %x next %x\n", 1666 pv->pv_va, pv->pv_next); 1667 return; 1668 } 1669 #endif 1670 /* 1671 * Mark it unmodified to avoid pageout 1672 */ 1673 pmap_changebit(pa, PG_M, FALSE); 1674 #ifdef DEBUG 1675 if (pmapdebug & PDB_PTPAGE) 1676 printf("pmap_pageable: PT page %x(%x) unmodified\n", 1677 sva, *(int *)pmap_pte(pmap, sva)); 1678 if (pmapdebug & PDB_WIRING) 1679 pmap_check_wiring("pageable", sva); 1680 #endif 1681 } 1682 } 1683 1684 /* 1685 * Clear the modify bits on the specified physical page. 1686 */ 1687 1688 void 1689 pmap_clear_modify(pa) 1690 vm_offset_t pa; 1691 { 1692 #ifdef DEBUG 1693 if (pmapdebug & PDB_FOLLOW) 1694 printf("pmap_clear_modify(%x)\n", pa); 1695 #endif 1696 pmap_changebit(pa, PG_M, FALSE); 1697 } 1698 1699 /* 1700 * pmap_clear_reference: 1701 * 1702 * Clear the reference bit on the specified physical page. 1703 */ 1704 1705 void pmap_clear_reference(pa) 1706 vm_offset_t pa; 1707 { 1708 #ifdef DEBUG 1709 if (pmapdebug & PDB_FOLLOW) 1710 printf("pmap_clear_reference(%x)\n", pa); 1711 #endif 1712 pmap_changebit(pa, PG_U, FALSE); 1713 } 1714 1715 /* 1716 * pmap_is_referenced: 1717 * 1718 * Return whether or not the specified physical page is referenced 1719 * by any physical maps. 1720 */ 1721 1722 boolean_t 1723 pmap_is_referenced(pa) 1724 vm_offset_t pa; 1725 { 1726 #ifdef DEBUG 1727 if (pmapdebug & PDB_FOLLOW) { 1728 boolean_t rv = pmap_testbit(pa, PG_U); 1729 printf("pmap_is_referenced(%x) -> %c\n", pa, "FT"[rv]); 1730 return(rv); 1731 } 1732 #endif 1733 return(pmap_testbit(pa, PG_U)); 1734 } 1735 1736 /* 1737 * pmap_is_modified: 1738 * 1739 * Return whether or not the specified physical page is modified 1740 * by any physical maps. 1741 */ 1742 1743 boolean_t 1744 pmap_is_modified(pa) 1745 vm_offset_t pa; 1746 { 1747 #ifdef DEBUG 1748 if (pmapdebug & PDB_FOLLOW) { 1749 boolean_t rv = pmap_testbit(pa, PG_M); 1750 printf("pmap_is_modified(%x) -> %c\n", pa, "FT"[rv]); 1751 return(rv); 1752 } 1753 #endif 1754 return(pmap_testbit(pa, PG_M)); 1755 } 1756 1757 vm_offset_t 1758 pmap_phys_address(ppn) 1759 int ppn; 1760 { 1761 return(hp300_ptob(ppn)); 1762 } 1763 1764 /* 1765 * Miscellaneous support routines follow 1766 */ 1767 1768 /* static */ 1769 hp300_protection_init() 1770 { 1771 register int *kp, prot; 1772 1773 kp = protection_codes; 1774 for (prot = 0; prot < 8; prot++) { 1775 switch (prot) { 1776 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 1777 *kp++ = 0; 1778 break; 1779 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 1780 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 1781 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 1782 *kp++ = PG_RO; 1783 break; 1784 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 1785 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 1786 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 1787 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 1788 *kp++ = PG_RW; 1789 break; 1790 } 1791 } 1792 } 1793 1794 /* static */ 1795 boolean_t 1796 pmap_testbit(pa, bit) 1797 register vm_offset_t pa; 1798 int bit; 1799 { 1800 register pv_entry_t pv; 1801 register int *pte, ix; 1802 int s; 1803 1804 if (pa < vm_first_phys || pa >= vm_last_phys) 1805 return(FALSE); 1806 1807 pv = pa_to_pvh(pa); 1808 s = splimp(); 1809 /* 1810 * Check saved info first 1811 */ 1812 if (pmap_attributes[pa_index(pa)] & bit) { 1813 splx(s); 1814 return(TRUE); 1815 } 1816 /* 1817 * Flush VAC to get correct state of any hardware maintained bits. 1818 */ 1819 if (pmap_aliasmask && (bit & (PG_U|PG_M))) 1820 DCIS(); 1821 /* 1822 * Not found, check current mappings returning 1823 * immediately if found. 1824 */ 1825 if (pv->pv_pmap != NULL) { 1826 for (; pv; pv = pv->pv_next) { 1827 pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va); 1828 ix = 0; 1829 do { 1830 if (*pte++ & bit) { 1831 splx(s); 1832 return(TRUE); 1833 } 1834 } while (++ix != hppagesperpage); 1835 } 1836 } 1837 splx(s); 1838 return(FALSE); 1839 } 1840 1841 /* static */ 1842 pmap_changebit(pa, bit, setem) 1843 register vm_offset_t pa; 1844 int bit; 1845 boolean_t setem; 1846 { 1847 register pv_entry_t pv; 1848 register int *pte, npte, ix; 1849 vm_offset_t va; 1850 int s; 1851 boolean_t firstpage = TRUE; 1852 1853 #ifdef DEBUG 1854 if (pmapdebug & PDB_BITS) 1855 printf("pmap_changebit(%x, %x, %s)\n", 1856 pa, bit, setem ? "set" : "clear"); 1857 #endif 1858 if (pa < vm_first_phys || pa >= vm_last_phys) 1859 return; 1860 1861 pv = pa_to_pvh(pa); 1862 s = splimp(); 1863 /* 1864 * Clear saved attributes (modify, reference) 1865 */ 1866 if (!setem) 1867 pmap_attributes[pa_index(pa)] &= ~bit; 1868 /* 1869 * Loop over all current mappings setting/clearing as appropos 1870 * If setting RO do we need to clear the VAC? 1871 */ 1872 if (pv->pv_pmap != NULL) { 1873 #ifdef DEBUG 1874 int toflush = 0; 1875 #endif 1876 for (; pv; pv = pv->pv_next) { 1877 #ifdef DEBUG 1878 toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1; 1879 #endif 1880 va = pv->pv_va; 1881 1882 /* 1883 * XXX don't write protect pager mappings 1884 */ 1885 if (bit == PG_RO) { 1886 extern vm_offset_t pager_sva, pager_eva; 1887 1888 if (va >= pager_sva && va < pager_eva) 1889 continue; 1890 } 1891 1892 pte = (int *) pmap_pte(pv->pv_pmap, va); 1893 /* 1894 * Flush VAC to ensure we get correct state of HW bits 1895 * so we don't clobber them. 1896 */ 1897 if (firstpage && pmap_aliasmask) { 1898 firstpage = FALSE; 1899 DCIS(); 1900 } 1901 ix = 0; 1902 do { 1903 if (setem) 1904 npte = *pte | bit; 1905 else 1906 npte = *pte & ~bit; 1907 if (*pte != npte) { 1908 *pte = npte; 1909 TBIS(va); 1910 } 1911 va += HP_PAGE_SIZE; 1912 pte++; 1913 } while (++ix != hppagesperpage); 1914 } 1915 #ifdef DEBUG 1916 if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) { 1917 if ((pmapvacflush & PVF_TOTAL) || toflush == 3) 1918 DCIA(); 1919 else if (toflush == 2) 1920 DCIS(); 1921 else 1922 DCIU(); 1923 } 1924 #endif 1925 } 1926 splx(s); 1927 } 1928 1929 /* static */ 1930 void 1931 pmap_enter_ptpage(pmap, va) 1932 register pmap_t pmap; 1933 register vm_offset_t va; 1934 { 1935 register vm_offset_t ptpa; 1936 register pv_entry_t pv; 1937 st_entry_t *ste; 1938 int s; 1939 1940 #ifdef DEBUG 1941 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE)) 1942 printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va); 1943 enter_stats.ptpneeded++; 1944 #endif 1945 /* 1946 * Allocate a segment table if necessary. Note that it is allocated 1947 * from kernel_map and not pt_map. This keeps user page tables 1948 * aligned on segment boundaries in the kernel address space. 1949 * The segment table is wired down. It will be freed whenever the 1950 * reference count drops to zero. 1951 */ 1952 if (pmap->pm_stab == Segtabzero) { 1953 pmap->pm_stab = (st_entry_t *) 1954 kmem_alloc(kernel_map, HP_STSIZE); 1955 pmap->pm_stchanged = TRUE; 1956 /* 1957 * XXX may have changed segment table pointer for current 1958 * process so update now to reload hardware. 1959 */ 1960 if (pmap == curproc->p_vmspace->vm_map.pmap) 1961 PMAP_ACTIVATE(pmap, (struct pcb *)curproc->p_addr, 1); 1962 #ifdef DEBUG 1963 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB)) 1964 printf("enter: pmap %x stab %x\n", 1965 pmap, pmap->pm_stab); 1966 #endif 1967 } 1968 1969 ste = pmap_ste(pmap, va); 1970 va = trunc_page((vm_offset_t)pmap_pte(pmap, va)); 1971 1972 /* 1973 * In the kernel we allocate a page from the kernel PT page 1974 * free list and map it into the kernel page table map (via 1975 * pmap_enter). 1976 */ 1977 if (pmap == kernel_pmap) { 1978 register struct kpt_page *kpt; 1979 1980 s = splimp(); 1981 if ((kpt = kpt_free_list) == (struct kpt_page *)0) { 1982 /* 1983 * No PT pages available. 1984 * Try once to free up unused ones. 1985 */ 1986 #ifdef DEBUG 1987 if (pmapdebug & PDB_COLLECT) 1988 printf("enter: no KPT pages, collecting...\n"); 1989 #endif 1990 pmap_collect(kernel_pmap); 1991 if ((kpt = kpt_free_list) == (struct kpt_page *)0) 1992 panic("pmap_enter_ptpage: can't get KPT page"); 1993 } 1994 #ifdef DEBUG 1995 if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse) 1996 kpt_stats.kptmaxuse = kpt_stats.kptinuse; 1997 #endif 1998 kpt_free_list = kpt->kpt_next; 1999 kpt->kpt_next = kpt_used_list; 2000 kpt_used_list = kpt; 2001 ptpa = kpt->kpt_pa; 2002 bzero(kpt->kpt_va, HP_PAGE_SIZE); 2003 pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT, TRUE); 2004 #ifdef DEBUG 2005 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) 2006 printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n", 2007 ste - pmap_ste(pmap, 0), 2008 *(int *)&Sysptmap[ste - pmap_ste(pmap, 0)], 2009 kpt->kpt_va); 2010 #endif 2011 splx(s); 2012 } 2013 /* 2014 * For user processes we just simulate a fault on that location 2015 * letting the VM system allocate a zero-filled page. 2016 */ 2017 else { 2018 #ifdef DEBUG 2019 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) 2020 printf("enter: about to fault UPT pg at %x\n", va); 2021 #endif 2022 if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE) 2023 != KERN_SUCCESS) 2024 panic("pmap_enter: vm_fault failed"); 2025 ptpa = pmap_extract(kernel_pmap, va); 2026 #ifdef DEBUG 2027 PHYS_TO_VM_PAGE(ptpa)->ptpage = TRUE; 2028 #endif 2029 } 2030 2031 /* 2032 * Locate the PV entry in the kernel for this PT page and 2033 * record the STE address. This is so that we can invalidate 2034 * the STE when we remove the mapping for the page. 2035 */ 2036 pv = pa_to_pvh(ptpa); 2037 s = splimp(); 2038 if (pv) { 2039 pv->pv_flags |= PV_PTPAGE; 2040 do { 2041 if (pv->pv_pmap == kernel_pmap && pv->pv_va == va) 2042 break; 2043 } while (pv = pv->pv_next); 2044 } 2045 #ifdef DEBUG 2046 if (pv == NULL) 2047 panic("pmap_enter_ptpage: PT page not entered"); 2048 #endif 2049 pv->pv_ptste = ste; 2050 pv->pv_ptpmap = pmap; 2051 #ifdef DEBUG 2052 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) 2053 printf("enter: new PT page at PA %x, ste at %x\n", ptpa, ste); 2054 #endif 2055 2056 /* 2057 * Map the new PT page into the segment table. 2058 * Also increment the reference count on the segment table if this 2059 * was a user page table page. Note that we don't use vm_map_pageable 2060 * to keep the count like we do for PT pages, this is mostly because 2061 * it would be difficult to identify ST pages in pmap_pageable to 2062 * release them. We also avoid the overhead of vm_map_pageable. 2063 */ 2064 *(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V; 2065 if (pmap != kernel_pmap) { 2066 pmap->pm_sref++; 2067 #ifdef DEBUG 2068 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB)) 2069 printf("enter: stab %x refcnt %d\n", 2070 pmap->pm_stab, pmap->pm_sref); 2071 #endif 2072 } 2073 /* 2074 * Flush stale TLB info. 2075 */ 2076 if (pmap == kernel_pmap) 2077 TBIAS(); 2078 else 2079 TBIAU(); 2080 pmap->pm_ptpages++; 2081 splx(s); 2082 } 2083 2084 #ifdef DEBUG 2085 pmap_pvdump(pa) 2086 vm_offset_t pa; 2087 { 2088 register pv_entry_t pv; 2089 2090 printf("pa %x", pa); 2091 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) 2092 printf(" -> pmap %x, va %x, ptste %x, ptpmap %x, flags %x", 2093 pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap, 2094 pv->pv_flags); 2095 printf("\n"); 2096 } 2097 2098 pmap_check_wiring(str, va) 2099 char *str; 2100 vm_offset_t va; 2101 { 2102 vm_map_entry_t entry; 2103 register int count, *pte; 2104 2105 va = trunc_page(va); 2106 if (!pmap_ste_v(pmap_ste(kernel_pmap, va)) || 2107 !pmap_pte_v(pmap_pte(kernel_pmap, va))) 2108 return; 2109 2110 if (!vm_map_lookup_entry(pt_map, va, &entry)) { 2111 printf("wired_check: entry for %x not found\n", va); 2112 return; 2113 } 2114 count = 0; 2115 for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++) 2116 if (*pte) 2117 count++; 2118 if (entry->wired_count != count) 2119 printf("*%s*: %x: w%d/a%d\n", 2120 str, va, entry->wired_count, count); 2121 } 2122 #endif 2123