1 /* 2 * Copyright (c) 1987 Carnegie-Mellon University 3 * Copyright (c) 1991 Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * The CMU software License Agreement specifies the terms and conditions 10 * for use and redistribution. 11 * 12 * @(#)pmap.c 7.1 (Berkeley) 12/05/90 13 */ 14 15 /* 16 * HP9000/300 series physical map management code. 17 * For 68020/68030 machines with HP, 68551, or 68030 MMUs 18 * (models 320,350,318,319,330,340,360,370,345,375) 19 * Don't even pay lip service to multiprocessor support. 20 */ 21 22 /* 23 * Manages physical address maps. 24 * 25 * In addition to hardware address maps, this 26 * module is called upon to provide software-use-only 27 * maps which may or may not be stored in the same 28 * form as hardware maps. These pseudo-maps are 29 * used to store intermediate results from copy 30 * operations to and from address spaces. 31 * 32 * Since the information managed by this module is 33 * also stored by the logical address mapping module, 34 * this module may throw away valid virtual-to-physical 35 * mappings at almost any time. However, invalidations 36 * of virtual-to-physical mappings must be done as 37 * requested. 38 * 39 * In order to cope with hardware architectures which 40 * make virtual-to-physical map invalidates expensive, 41 * this module may delay invalidate or reduced protection 42 * operations until such time as they are actually 43 * necessary. This module is given full information as 44 * to which processors are currently using which maps, 45 * and to when physical maps must be made correct. 46 */ 47 48 #include "param.h" 49 #include "../vm/vm_param.h" 50 #include "user.h" 51 #include "proc.h" 52 #include "lock.h" 53 #include "malloc.h" 54 55 #include "../vm/pmap.h" 56 #include "../vm/vm_map.h" 57 #include "../vm/vm_kern.h" 58 #include "../vm/vm_prot.h" 59 #include "../vm/vm_page.h" 60 61 #include "machine/cpu.h" 62 63 /* 64 * Allocate various and sundry SYSMAPs used in the days of old VM 65 * and not yet converted. XXX. 66 */ 67 #define BSDVM_COMPAT 1 68 69 #ifdef DEBUG 70 struct { 71 int collectscans; 72 int collectpages; 73 int kpttotal; 74 int kptinuse; 75 int kptmaxuse; 76 } kpt_stats; 77 struct { 78 int kernel; /* entering kernel mapping */ 79 int user; /* entering user mapping */ 80 int ptpneeded; /* needed to allocate a PT page */ 81 int pwchange; /* no mapping change, just wiring or protection */ 82 int wchange; /* no mapping change, just wiring */ 83 int mchange; /* was mapped but mapping to different page */ 84 int managed; /* a managed page */ 85 int firstpv; /* first mapping for this PA */ 86 int secondpv; /* second mapping for this PA */ 87 int ci; /* cache inhibited */ 88 int unmanaged; /* not a managed page */ 89 int flushes; /* cache flushes */ 90 } enter_stats; 91 struct { 92 int calls; 93 int removes; 94 int pvfirst; 95 int pvsearch; 96 int ptinvalid; 97 int uflushes; 98 int sflushes; 99 } remove_stats; 100 101 int debugmap = 0; 102 int pmapdebug = 0x2000; 103 #define PDB_FOLLOW 0x0001 104 #define PDB_INIT 0x0002 105 #define PDB_ENTER 0x0004 106 #define PDB_REMOVE 0x0008 107 #define PDB_CREATE 0x0010 108 #define PDB_PTPAGE 0x0020 109 #define PDB_CACHE 0x0040 110 #define PDB_BITS 0x0080 111 #define PDB_COLLECT 0x0100 112 #define PDB_PROTECT 0x0200 113 #define PDB_SEGTAB 0x0400 114 #define PDB_PARANOIA 0x2000 115 #define PDB_WIRING 0x4000 116 #define PDB_PVDUMP 0x8000 117 118 int pmapvacflush = 0; 119 #define PVF_ENTER 0x01 120 #define PVF_REMOVE 0x02 121 #define PVF_PROTECT 0x04 122 #define PVF_TOTAL 0x80 123 #endif 124 125 /* 126 * Get STEs and PTEs for user/kernel address space 127 */ 128 #define pmap_ste(m, v) (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT])) 129 #define pmap_pte(m, v) (&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT])) 130 131 #define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME) 132 133 #define pmap_ste_v(pte) ((pte)->sg_v) 134 #define pmap_pte_w(pte) ((pte)->pg_w) 135 #define pmap_pte_ci(pte) ((pte)->pg_ci) 136 #define pmap_pte_m(pte) ((pte)->pg_m) 137 #define pmap_pte_u(pte) ((pte)->pg_u) 138 #define pmap_pte_v(pte) ((pte)->pg_v) 139 #define pmap_pte_set_w(pte, v) ((pte)->pg_w = (v)) 140 #define pmap_pte_set_prot(pte, v) ((pte)->pg_prot = (v)) 141 142 /* 143 * Given a map and a machine independent protection code, 144 * convert to a vax protection code. 145 */ 146 #define pte_prot(m, p) (protection_codes[p]) 147 int protection_codes[8]; 148 149 /* 150 * Kernel page table page management. 151 */ 152 struct kpt_page { 153 struct kpt_page *kpt_next; /* link on either used or free list */ 154 vm_offset_t kpt_va; /* always valid kernel VA */ 155 vm_offset_t kpt_pa; /* PA of this page (for speed) */ 156 }; 157 struct kpt_page *kpt_free_list, *kpt_used_list; 158 struct kpt_page *kpt_pages; 159 160 /* 161 * Kernel segment/page table and page table map. 162 * The page table map gives us a level of indirection we need to dynamically 163 * expand the page table. It is essentially a copy of the segment table 164 * with PTEs instead of STEs. All are initialized in locore at boot time. 165 * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs. 166 * Segtabzero is an empty segment table which all processes share til they 167 * reference something. 168 */ 169 st_entry_t *Sysseg; 170 pt_entry_t *Sysmap, *Sysptmap; 171 st_entry_t *Segtabzero; 172 #if BSDVM_COMPAT 173 vm_size_t Sysptsize = VM_KERNEL_PT_PAGES + 4 / NPTEPG; 174 #else 175 vm_size_t Sysptsize = VM_KERNEL_PT_PAGES; 176 #endif 177 178 struct pmap kernel_pmap_store; 179 pmap_t kernel_pmap; 180 vm_map_t pt_map; 181 182 vm_offset_t avail_start; /* PA of first available physical page */ 183 vm_offset_t avail_end; /* PA of last available physical page */ 184 vm_size_t mem_size; /* memory size in bytes */ 185 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/ 186 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 187 vm_offset_t vm_first_phys; /* PA of first managed page */ 188 vm_offset_t vm_last_phys; /* PA just past last managed page */ 189 int hppagesperpage; /* PAGE_SIZE / HP_PAGE_SIZE */ 190 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 191 int pmap_aliasmask; /* seperation at which VA aliasing ok */ 192 char *pmap_attributes; /* reference and modify bits */ 193 194 boolean_t pmap_testbit(); 195 void pmap_enter_ptpage(); 196 197 #if BSDVM_COMPAT 198 #include "msgbuf.h" 199 200 /* 201 * All those kernel PT submaps that BSD is so fond of 202 */ 203 struct pte *CMAP1, *CMAP2, *mmap; 204 caddr_t CADDR1, CADDR2, vmmap; 205 struct pte *msgbufmap; 206 struct msgbuf *msgbufp; 207 #endif 208 209 /* 210 * Bootstrap the system enough to run with virtual memory. 211 * Map the kernel's code and data, and allocate the system page table. 212 * 213 * On the HP this is called after mapping has already been enabled 214 * and just syncs the pmap module with what has already been done. 215 * [We can't call it easily with mapping off since the kernel is not 216 * mapped with PA == VA, hence we would have to relocate every address 217 * from the linked base (virtual) address 0 to the actual (physical) 218 * address of 0xFFxxxxxx.] 219 */ 220 void 221 pmap_bootstrap(firstaddr, loadaddr) 222 vm_offset_t firstaddr; 223 vm_offset_t loadaddr; 224 { 225 #if BSDVM_COMPAT 226 vm_offset_t va; 227 struct pte *pte; 228 #endif 229 extern vm_offset_t maxmem, physmem; 230 231 avail_start = firstaddr; 232 avail_end = maxmem << PGSHIFT; 233 234 /* XXX: allow for msgbuf */ 235 avail_end -= hp300_round_page(sizeof(struct msgbuf)); 236 237 mem_size = physmem << PGSHIFT; 238 virtual_avail = VM_MIN_KERNEL_ADDRESS + (firstaddr - loadaddr); 239 virtual_end = VM_MAX_KERNEL_ADDRESS; 240 hppagesperpage = PAGE_SIZE / HP_PAGE_SIZE; 241 242 /* 243 * Determine VA aliasing distance if any 244 */ 245 if (ectype == EC_VIRT) 246 switch (machineid) { 247 case HP_320: 248 pmap_aliasmask = 0x3fff; /* 16k */ 249 break; 250 case HP_350: 251 pmap_aliasmask = 0x7fff; /* 32k */ 252 break; 253 } 254 255 /* 256 * Initialize protection array. 257 */ 258 hp300_protection_init(); 259 260 /* 261 * The kernel's pmap is statically allocated so we don't 262 * have to use pmap_create, which is unlikely to work 263 * correctly at this part of the boot sequence. 264 */ 265 kernel_pmap = &kernel_pmap_store; 266 267 /* 268 * Kernel page/segment table allocated in locore, 269 * just initialize pointers. 270 */ 271 kernel_pmap->pm_stab = Sysseg; 272 kernel_pmap->pm_ptab = Sysmap; 273 274 simple_lock_init(&kernel_pmap->pm_lock); 275 kernel_pmap->pm_count = 1; 276 277 #if BSDVM_COMPAT 278 /* 279 * Allocate all the submaps we need 280 */ 281 #define SYSMAP(c, p, v, n) \ 282 v = (c)va; va += ((n)*HP_PAGE_SIZE); p = pte; pte += (n); 283 284 va = virtual_avail; 285 pte = pmap_pte(kernel_pmap, va); 286 287 SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 ) 288 SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 ) 289 SYSMAP(caddr_t ,mmap ,vmmap ,1 ) 290 SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 ) 291 virtual_avail = va; 292 #endif 293 } 294 295 /* 296 * Initialize the pmap module. 297 * Called by vm_init, to initialize any structures that the pmap 298 * system needs to map virtual memory. 299 */ 300 void 301 pmap_init(phys_start, phys_end) 302 vm_offset_t phys_start, phys_end; 303 { 304 vm_offset_t addr, addr2; 305 vm_size_t npg, s; 306 int rv; 307 extern vm_offset_t DIObase; 308 309 #ifdef DEBUG 310 if (pmapdebug & PDB_FOLLOW) 311 printf("pmap_init(%x, %x)\n", phys_start, phys_end); 312 #endif 313 /* 314 * Now that kernel map has been allocated, we can mark as 315 * unavailable regions which we have mapped in locore. 316 */ 317 addr = DIObase; 318 (void) vm_map_find(kernel_map, VM_OBJECT_NULL, (vm_offset_t) 0, 319 &addr, hp300_ptob(IOMAPSIZE), FALSE); 320 if (addr != DIObase) 321 goto bogons; 322 addr = (vm_offset_t) Sysmap; 323 vm_object_reference(kernel_object); 324 (void) vm_map_find(kernel_map, kernel_object, addr, 325 &addr, HP_MAX_PTSIZE, FALSE); 326 /* 327 * If this fails it is probably because the static portion of 328 * the kernel page table isn't big enough and we overran the 329 * page table map. Need to adjust pmap_size() in hp300_init.c. 330 */ 331 if (addr != (vm_offset_t)Sysmap) 332 goto bogons; 333 334 addr = (vm_offset_t) &u; 335 vm_object_reference(kernel_object); 336 (void) vm_map_find(kernel_map, kernel_object, addr, 337 &addr, hp300_ptob(UPAGES), FALSE); 338 if (addr != (vm_offset_t)&u) 339 bogons: 340 panic("pmap_init: bogons in the VM system!\n"); 341 342 #ifdef DEBUG 343 if (pmapdebug & PDB_INIT) { 344 printf("pmap_init: Sysseg %x, Sysmap %x, Sysptmap %x\n", 345 Sysseg, Sysmap, Sysptmap); 346 printf(" pstart %x, pend %x, vstart %x, vend %x\n", 347 avail_start, avail_end, virtual_avail, virtual_end); 348 } 349 #endif 350 351 /* 352 * Allocate memory for random pmap data structures. Includes the 353 * initial segment table, pv_head_table and pmap_attributes. 354 */ 355 npg = atop(phys_end - phys_start); 356 s = (vm_size_t) (HP_STSIZE + sizeof(struct pv_entry) * npg + npg); 357 s = round_page(s); 358 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 359 Segtabzero = (st_entry_t *) addr; 360 addr += HP_STSIZE; 361 pv_table = (pv_entry_t) addr; 362 addr += sizeof(struct pv_entry) * npg; 363 pmap_attributes = (char *) addr; 364 #ifdef DEBUG 365 if (pmapdebug & PDB_INIT) 366 printf("pmap_init: %x bytes (%x pgs): seg %x tbl %x attr %x\n", 367 s, npg, Segtabzero, pv_table, pmap_attributes); 368 #endif 369 370 /* 371 * Allocate physical memory for kernel PT pages and their management. 372 * We need 1 PT page per possible task plus some slop. 373 */ 374 npg = min(atop(HP_MAX_KPTSIZE), nproc+16); 375 s = ptoa(npg) + round_page(npg * sizeof(struct kpt_page)); 376 377 /* 378 * Verify that space will be allocated in region for which 379 * we already have kernel PT pages. 380 */ 381 addr = 0; 382 rv = vm_map_find(kernel_map, VM_OBJECT_NULL, 0, &addr, s, TRUE); 383 if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap) 384 panic("pmap_init: kernel PT too small"); 385 vm_map_remove(kernel_map, addr, addr + s); 386 387 /* 388 * Now allocate the space and link the pages together to 389 * form the KPT free list. 390 */ 391 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 392 s = ptoa(npg); 393 addr2 = addr + s; 394 kpt_pages = &((struct kpt_page *)addr2)[npg]; 395 kpt_free_list = (struct kpt_page *) 0; 396 do { 397 addr2 -= HP_PAGE_SIZE; 398 (--kpt_pages)->kpt_next = kpt_free_list; 399 kpt_free_list = kpt_pages; 400 kpt_pages->kpt_va = addr2; 401 kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2); 402 } while (addr != addr2); 403 #ifdef DEBUG 404 kpt_stats.kpttotal = atop(s); 405 if (pmapdebug & PDB_INIT) 406 printf("pmap_init: KPT: %d pages from %x to %x\n", 407 atop(s), addr, addr + s); 408 #endif 409 410 /* 411 * Slightly modified version of kmem_suballoc() to get page table 412 * map where we want it. 413 */ 414 addr = HP_PTBASE; 415 s = min(HP_PTMAXSIZE, nproc*HP_MAX_PTSIZE); 416 addr2 = addr + s; 417 rv = vm_map_find(kernel_map, VM_OBJECT_NULL, 0, &addr, s, TRUE); 418 if (rv != KERN_SUCCESS) 419 panic("pmap_init: cannot allocate space for PT map"); 420 pmap_reference(vm_map_pmap(kernel_map)); 421 pt_map = vm_map_create(vm_map_pmap(kernel_map), addr, addr2, TRUE); 422 if (pt_map == VM_MAP_NULL) 423 panic("pmap_init: cannot create pt_map"); 424 rv = vm_map_submap(kernel_map, addr, addr2, pt_map); 425 if (rv != KERN_SUCCESS) 426 panic("pmap_init: cannot map range to pt_map"); 427 #ifdef DEBUG 428 if (pmapdebug & PDB_INIT) 429 printf("pmap_init: pt_map [%x - %x)\n", addr, addr2); 430 #endif 431 432 /* 433 * Now it is safe to enable pv_table recording. 434 */ 435 vm_first_phys = phys_start; 436 vm_last_phys = phys_end; 437 pmap_initialized = TRUE; 438 } 439 440 /* 441 * Used to map a range of physical addresses into kernel 442 * virtual address space. 443 * 444 * For now, VM is already on, we only need to map the 445 * specified memory. 446 */ 447 vm_offset_t 448 pmap_map(virt, start, end, prot) 449 vm_offset_t virt; 450 vm_offset_t start; 451 vm_offset_t end; 452 int prot; 453 { 454 #ifdef DEBUG 455 if (pmapdebug & PDB_FOLLOW) 456 printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot); 457 #endif 458 while (start < end) { 459 pmap_enter(kernel_pmap, virt, start, prot, FALSE); 460 virt += PAGE_SIZE; 461 start += PAGE_SIZE; 462 } 463 return(virt); 464 } 465 466 /* 467 * Create and return a physical map. 468 * 469 * If the size specified for the map 470 * is zero, the map is an actual physical 471 * map, and may be referenced by the 472 * hardware. 473 * 474 * If the size specified is non-zero, 475 * the map will be used in software only, and 476 * is bounded by that size. 477 */ 478 pmap_t 479 pmap_create(size) 480 vm_size_t size; 481 { 482 register pmap_t pmap; 483 484 #ifdef DEBUG 485 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 486 printf("pmap_create(%x)\n", size); 487 #endif 488 /* 489 * Software use map does not need a pmap 490 */ 491 if (size) 492 return(PMAP_NULL); 493 494 /* XXX: is it ok to wait here? */ 495 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); 496 if (pmap == PMAP_NULL) 497 panic("pmap_create: cannot allocate a pmap"); 498 499 /* 500 * No need to allocate page table space yet but we do need a 501 * valid segment table. Initially, we point everyone at the 502 * "null" segment table. On the first pmap_enter, a real 503 * segment table will be allocated. 504 */ 505 pmap->pm_ptab = PT_ENTRY_NULL; 506 pmap->pm_stab = Segtabzero; 507 pmap->pm_stchanged = TRUE; 508 pmap->pm_sref = 0; 509 pmap->pm_count = 1; 510 simple_lock_init(&pmap->pm_lock); 511 pmap->pm_stats.resident_count = 0; 512 pmap->pm_stats.wired_count = 0; 513 pmap->pm_ptpages = 0; 514 return(pmap); 515 } 516 517 /* 518 * Retire the given physical map from service. 519 * Should only be called if the map contains 520 * no valid mappings. 521 */ 522 void 523 pmap_destroy(pmap) 524 register pmap_t pmap; 525 { 526 int count; 527 528 #ifdef DEBUG 529 if (pmapdebug & PDB_FOLLOW) 530 printf("pmap_destroy(%x)\n", pmap); 531 #endif 532 if (pmap == PMAP_NULL) 533 return; 534 535 simple_lock(&pmap->pm_lock); 536 count = --pmap->pm_count; 537 simple_unlock(&pmap->pm_lock); 538 if (count) 539 return; 540 541 if (pmap->pm_ptab) 542 kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab, 543 HP_MAX_PTSIZE); 544 if (pmap->pm_stab != Segtabzero) 545 kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, HP_STSIZE); 546 free((caddr_t)pmap, M_VMPMAP); 547 } 548 549 /* 550 * Add a reference to the specified pmap. 551 */ 552 void 553 pmap_reference(pmap) 554 pmap_t pmap; 555 { 556 #ifdef DEBUG 557 if (pmapdebug & PDB_FOLLOW) 558 printf("pmap_reference(%x)\n", pmap); 559 #endif 560 if (pmap != PMAP_NULL) { 561 simple_lock(&pmap->pm_lock); 562 pmap->pm_count++; 563 simple_unlock(&pmap->pm_lock); 564 } 565 } 566 567 /* 568 * Remove the given range of addresses from the specified map. 569 * 570 * It is assumed that the start and end are properly 571 * rounded to the page size. 572 */ 573 void 574 pmap_remove(pmap, sva, eva) 575 register pmap_t pmap; 576 vm_offset_t sva, eva; 577 { 578 register vm_offset_t pa, va; 579 register pt_entry_t *pte; 580 register pv_entry_t pv, npv; 581 register int ix; 582 pmap_t ptpmap; 583 int *ste, s, bits; 584 boolean_t firstpage = TRUE; 585 boolean_t flushcache = FALSE; 586 #ifdef DEBUG 587 pt_entry_t opte; 588 589 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 590 printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva); 591 #endif 592 593 if (pmap == PMAP_NULL) 594 return; 595 596 #ifdef DEBUG 597 remove_stats.calls++; 598 #endif 599 for (va = sva; va < eva; va += PAGE_SIZE) { 600 /* 601 * Weed out invalid mappings. 602 * Note: we assume that the segment table is always allocated. 603 */ 604 if (!pmap_ste_v(pmap_ste(pmap, va))) { 605 /* XXX: avoid address wrap around */ 606 if (va >= hp300_trunc_seg((vm_offset_t)-1)) 607 break; 608 va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE; 609 continue; 610 } 611 pte = pmap_pte(pmap, va); 612 pa = pmap_pte_pa(pte); 613 if (pa == 0) 614 continue; 615 /* 616 * Invalidating a non-CI page, must flush external VAC 617 * unless it is a supervisor mapping and we have already 618 * flushed the supervisor side. 619 */ 620 if (pmap_aliasmask && !pmap_pte_ci(pte) && 621 !(pmap == kernel_pmap && firstpage)) 622 flushcache = TRUE; 623 #ifdef DEBUG 624 opte = *pte; 625 remove_stats.removes++; 626 #endif 627 /* 628 * Update statistics 629 */ 630 if (pmap_pte_w(pte)) 631 pmap->pm_stats.wired_count--; 632 pmap->pm_stats.resident_count--; 633 634 /* 635 * Invalidate the PTEs. 636 * XXX: should cluster them up and invalidate as many 637 * as possible at once. 638 */ 639 #ifdef DEBUG 640 if (pmapdebug & PDB_REMOVE) 641 printf("remove: invalidating %x ptes at %x\n", 642 hppagesperpage, pte); 643 #endif 644 /* 645 * Flush VAC to ensure we get the correct state of any 646 * hardware maintained bits. 647 */ 648 if (firstpage && pmap_aliasmask) { 649 firstpage = FALSE; 650 if (pmap == kernel_pmap) 651 flushcache = FALSE; 652 DCIS(); 653 #ifdef DEBUG 654 remove_stats.sflushes++; 655 #endif 656 } 657 bits = ix = 0; 658 do { 659 bits |= *(int *)pte & (PG_U|PG_M); 660 *(int *)pte++ = PG_NV; 661 TBIS(va + ix * HP_PAGE_SIZE); 662 } while (++ix != hppagesperpage); 663 664 /* 665 * For user mappings decrement the wiring count on 666 * the PT page. We do this after the PTE has been 667 * invalidated because vm_map_pageable winds up in 668 * pmap_pageable which clears the modify bit for the 669 * PT page. 670 */ 671 if (pmap != kernel_pmap) { 672 pte = pmap_pte(pmap, va); 673 vm_map_pageable(pt_map, trunc_page(pte), 674 round_page(pte+1), TRUE); 675 #ifdef DEBUG 676 if (pmapdebug & PDB_WIRING) 677 pmap_check_wiring("remove", trunc_page(pte)); 678 #endif 679 } 680 /* 681 * Remove from the PV table (raise IPL since we 682 * may be called at interrupt time). 683 */ 684 if (pa < vm_first_phys || pa >= vm_last_phys) 685 continue; 686 pv = pa_to_pvh(pa); 687 ste = (int *)0; 688 s = splimp(); 689 /* 690 * If it is the first entry on the list, it is actually 691 * in the header and we must copy the following entry up 692 * to the header. Otherwise we must search the list for 693 * the entry. In either case we free the now unused entry. 694 */ 695 if (pmap == pv->pv_pmap && va == pv->pv_va) { 696 ste = (int *)pv->pv_ptste; 697 ptpmap = pv->pv_ptpmap; 698 npv = pv->pv_next; 699 if (npv) { 700 *pv = *npv; 701 free((caddr_t)npv, M_VMPVENT); 702 } else 703 pv->pv_pmap = PMAP_NULL; 704 #ifdef DEBUG 705 remove_stats.pvfirst++; 706 #endif 707 } else { 708 for (npv = pv->pv_next; npv; npv = npv->pv_next) { 709 #ifdef DEBUG 710 remove_stats.pvsearch++; 711 #endif 712 if (pmap == npv->pv_pmap && va == npv->pv_va) 713 break; 714 pv = npv; 715 } 716 #ifdef DEBUG 717 if (npv == PV_ENTRY_NULL) 718 panic("pmap_remove: PA not in pv_tab"); 719 #endif 720 ste = (int *)npv->pv_ptste; 721 ptpmap = npv->pv_ptpmap; 722 pv->pv_next = npv->pv_next; 723 free((caddr_t)npv, M_VMPVENT); 724 pv = pa_to_pvh(pa); 725 } 726 /* 727 * If only one mapping left we no longer need to cache inhibit 728 */ 729 if (pv->pv_pmap && 730 pv->pv_next == PV_ENTRY_NULL && (pv->pv_flags & PV_CI)) { 731 #ifdef DEBUG 732 if (pmapdebug & PDB_CACHE) 733 printf("remove: clearing CI for pa %x\n", pa); 734 #endif 735 pv->pv_flags &= ~PV_CI; 736 pmap_changebit(pa, PG_CI, FALSE); 737 #ifdef DEBUG 738 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) == 739 (PDB_CACHE|PDB_PVDUMP)) 740 pmap_pvdump(pa); 741 #endif 742 } 743 744 /* 745 * If this was a PT page we must also remove the 746 * mapping from the associated segment table. 747 */ 748 if (ste) { 749 #ifdef DEBUG 750 remove_stats.ptinvalid++; 751 if (pmapdebug & (PDB_REMOVE|PDB_PTPAGE)) { 752 printf("remove: ste was %x@%x pte was %x@%x\n", 753 *ste, ste, 754 *(int *)&opte, pmap_pte(pmap, va)); 755 } 756 #endif 757 *ste = SG_NV; 758 /* 759 * If it was a user PT page, we decrement the 760 * reference count on the segment table as well, 761 * freeing it if it is now empty. 762 */ 763 if (ptpmap != kernel_pmap) { 764 #ifdef DEBUG 765 if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB)) 766 printf("remove: stab %x, refcnt %d\n", 767 ptpmap->pm_stab, 768 ptpmap->pm_sref - 1); 769 if ((pmapdebug & PDB_PARANOIA) && 770 ptpmap->pm_stab != trunc_page(ste)) 771 panic("remove: bogus ste"); 772 #endif 773 if (--(ptpmap->pm_sref) == 0) { 774 #ifdef DEBUG 775 if (pmapdebug&(PDB_REMOVE|PDB_SEGTAB)) 776 printf("remove: free stab %x\n", 777 ptpmap->pm_stab); 778 #endif 779 kmem_free(kernel_map, 780 (vm_offset_t)ptpmap->pm_stab, 781 HP_STSIZE); 782 ptpmap->pm_stab = Segtabzero; 783 ptpmap->pm_stchanged = TRUE; 784 /* 785 * XXX may have changed segment table 786 * pointer for current process so 787 * update now to reload hardware. 788 */ 789 if (ptpmap == u.u_procp->p_map->pmap) 790 PMAP_ACTIVATE(ptpmap, 791 (struct pcb *)u.u_procp->p_addr); 792 } 793 } 794 if (ptpmap == kernel_pmap) 795 TBIAS(); 796 else 797 TBIAU(); 798 pv->pv_flags &= ~PV_PTPAGE; 799 ptpmap->pm_ptpages--; 800 } 801 /* 802 * Update saved attributes for managed page 803 */ 804 pmap_attributes[pa_index(pa)] |= bits; 805 splx(s); 806 } 807 #ifdef DEBUG 808 if (pmapvacflush & PVF_REMOVE) { 809 if (pmapvacflush & PVF_TOTAL) 810 DCIA(); 811 else if (pmap == kernel_pmap) 812 DCIS(); 813 else 814 DCIU(); 815 } 816 #endif 817 if (flushcache) { 818 if (pmap == kernel_pmap) { 819 DCIS(); 820 #ifdef DEBUG 821 remove_stats.sflushes++; 822 #endif 823 } else { 824 DCIU(); 825 #ifdef DEBUG 826 remove_stats.uflushes++; 827 #endif 828 } 829 } 830 } 831 832 /* 833 * Routine: pmap_remove_all 834 * Function: 835 * Removes this physical page from 836 * all physical maps in which it resides. 837 * Reflects back modify bits to the pager. 838 */ 839 void 840 pmap_remove_all(pa) 841 vm_offset_t pa; 842 { 843 register pv_entry_t pv; 844 int s; 845 846 #ifdef DEBUG 847 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 848 printf("pmap_remove_all(%x)\n", pa); 849 #endif 850 /* 851 * Not one of ours 852 */ 853 if (pa < vm_first_phys || pa >= vm_last_phys) 854 return; 855 856 pv = pa_to_pvh(pa); 857 s = splimp(); 858 /* 859 * Do it the easy way for now 860 */ 861 while (pv->pv_pmap != PMAP_NULL) { 862 #ifdef DEBUG 863 if (!pmap_ste_v(pmap_ste(pv->pv_pmap, pv->pv_va)) || 864 pmap_pte_pa(pmap_pte(pv->pv_pmap, pv->pv_va)) != pa) 865 panic("pmap_remove_all: bad mapping"); 866 #endif 867 pmap_remove(pv->pv_pmap, pv->pv_va, pv->pv_va + PAGE_SIZE); 868 } 869 splx(s); 870 } 871 872 /* 873 * Routine: pmap_copy_on_write 874 * Function: 875 * Remove write privileges from all 876 * physical maps for this physical page. 877 */ 878 void 879 pmap_copy_on_write(pa) 880 vm_offset_t pa; 881 { 882 #ifdef DEBUG 883 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 884 printf("pmap_copy_on_write(%x)\n", pa); 885 #endif 886 pmap_changebit(pa, PG_RO, TRUE); 887 } 888 889 /* 890 * Set the physical protection on the 891 * specified range of this map as requested. 892 */ 893 void 894 pmap_protect(pmap, sva, eva, prot) 895 register pmap_t pmap; 896 vm_offset_t sva, eva; 897 vm_prot_t prot; 898 { 899 register pt_entry_t *pte; 900 register vm_offset_t va; 901 register int ix; 902 int hpprot; 903 boolean_t firstpage = TRUE; 904 905 #ifdef DEBUG 906 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 907 printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot); 908 #endif 909 if (pmap == PMAP_NULL) 910 return; 911 912 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 913 pmap_remove(pmap, sva, eva); 914 return; 915 } 916 if (prot & VM_PROT_WRITE) 917 return; 918 919 pte = pmap_pte(pmap, sva); 920 hpprot = pte_prot(pmap, prot) == PG_RO ? 1 : 0; 921 for (va = sva; va < eva; va += PAGE_SIZE) { 922 /* 923 * Page table page is not allocated. 924 * Skip it, we don't want to force allocation 925 * of unnecessary PTE pages just to set the protection. 926 */ 927 if (!pmap_ste_v(pmap_ste(pmap, va))) { 928 /* XXX: avoid address wrap around */ 929 if (va >= hp300_trunc_seg((vm_offset_t)-1)) 930 break; 931 va = hp300_round_seg(va + PAGE_SIZE) - PAGE_SIZE; 932 pte = pmap_pte(pmap, va); 933 pte += hppagesperpage; 934 continue; 935 } 936 /* 937 * Page not valid. Again, skip it. 938 * Should we do this? Or set protection anyway? 939 */ 940 if (!pmap_pte_v(pte)) { 941 pte += hppagesperpage; 942 continue; 943 } 944 /* 945 * Flush VAC to ensure we get correct state of HW bits 946 * so we don't clobber them. 947 */ 948 if (firstpage && pmap_aliasmask) { 949 firstpage = FALSE; 950 DCIS(); 951 } 952 ix = 0; 953 do { 954 /* clear VAC here if PG_RO? */ 955 pmap_pte_set_prot(pte++, hpprot); 956 TBIS(va + ix * HP_PAGE_SIZE); 957 } while (++ix != hppagesperpage); 958 } 959 #ifdef DEBUG 960 if (hpprot && (pmapvacflush & PVF_PROTECT)) { 961 if (pmapvacflush & PVF_TOTAL) 962 DCIA(); 963 else if (pmap == kernel_pmap) 964 DCIS(); 965 else 966 DCIU(); 967 } 968 #endif 969 } 970 971 /* 972 * Insert the given physical page (p) at 973 * the specified virtual address (v) in the 974 * target physical map with the protection requested. 975 * 976 * If specified, the page will be wired down, meaning 977 * that the related pte can not be reclaimed. 978 * 979 * NB: This is the only routine which MAY NOT lazy-evaluate 980 * or lose information. That is, this routine must actually 981 * insert this page into the given map NOW. 982 */ 983 void 984 pmap_enter(pmap, va, pa, prot, wired) 985 register pmap_t pmap; 986 vm_offset_t va; 987 register vm_offset_t pa; 988 vm_prot_t prot; 989 boolean_t wired; 990 { 991 register pt_entry_t *pte; 992 register int npte, ix; 993 vm_offset_t opa; 994 boolean_t cacheable = TRUE; 995 boolean_t checkpv = TRUE; 996 997 #ifdef DEBUG 998 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 999 printf("pmap_enter(%x, %x, %x, %x, %x)\n", 1000 pmap, va, pa, prot, wired); 1001 #endif 1002 if (pmap == PMAP_NULL) 1003 return; 1004 1005 #ifdef DEBUG 1006 if (pmap == kernel_pmap) 1007 enter_stats.kernel++; 1008 else 1009 enter_stats.user++; 1010 #endif 1011 /* 1012 * For user mapping, allocate kernel VM resources if necessary. 1013 */ 1014 if (pmap->pm_ptab == PT_ENTRY_NULL) 1015 pmap->pm_ptab = (pt_entry_t *) 1016 kmem_alloc_wait(pt_map, HP_MAX_PTSIZE); 1017 1018 /* 1019 * Segment table entry not valid, we need a new PT page 1020 */ 1021 if (!pmap_ste_v(pmap_ste(pmap, va))) 1022 pmap_enter_ptpage(pmap, va); 1023 1024 pte = pmap_pte(pmap, va); 1025 opa = pmap_pte_pa(pte); 1026 #ifdef DEBUG 1027 if (pmapdebug & PDB_ENTER) 1028 printf("enter: pte %x, *pte %x\n", pte, *(int *)pte); 1029 #endif 1030 1031 /* 1032 * Mapping has not changed, must be protection or wiring change. 1033 */ 1034 if (opa == pa) { 1035 #ifdef DEBUG 1036 enter_stats.pwchange++; 1037 #endif 1038 /* 1039 * Wiring change, just update stats. 1040 * We don't worry about wiring PT pages as they remain 1041 * resident as long as there are valid mappings in them. 1042 * Hence, if a user page is wired, the PT page will be also. 1043 */ 1044 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { 1045 #ifdef DEBUG 1046 if (pmapdebug & PDB_ENTER) 1047 printf("enter: wiring change -> %x\n", wired); 1048 #endif 1049 if (wired) 1050 pmap->pm_stats.wired_count++; 1051 else 1052 pmap->pm_stats.wired_count--; 1053 #ifdef DEBUG 1054 enter_stats.wchange++; 1055 #endif 1056 } 1057 /* 1058 * Retain cache inhibition status 1059 */ 1060 checkpv = FALSE; 1061 if (pmap_pte_ci(pte)) 1062 cacheable = FALSE; 1063 goto validate; 1064 } 1065 1066 /* 1067 * Mapping has changed, invalidate old range and fall through to 1068 * handle validating new mapping. 1069 */ 1070 if (opa) { 1071 #ifdef DEBUG 1072 if (pmapdebug & PDB_ENTER) 1073 printf("enter: removing old mapping %x\n", va); 1074 #endif 1075 pmap_remove(pmap, va, va + PAGE_SIZE); 1076 #ifdef DEBUG 1077 enter_stats.mchange++; 1078 #endif 1079 } 1080 1081 /* 1082 * If this is a new user mapping, increment the wiring count 1083 * on this PT page. PT pages are wired down as long as there 1084 * is a valid mapping in the page. 1085 */ 1086 if (pmap != kernel_pmap) 1087 vm_map_pageable(pt_map, trunc_page(pte), 1088 round_page(pte+1), FALSE); 1089 1090 /* 1091 * Enter on the PV list if part of our managed memory 1092 * Note that we raise IPL while manipulating pv_table 1093 * since pmap_enter can be called at interrupt time. 1094 */ 1095 if (pa >= vm_first_phys && pa < vm_last_phys) { 1096 register pv_entry_t pv, npv; 1097 int s; 1098 1099 #ifdef DEBUG 1100 enter_stats.managed++; 1101 #endif 1102 pv = pa_to_pvh(pa); 1103 s = splimp(); 1104 #ifdef DEBUG 1105 if (pmapdebug & PDB_ENTER) 1106 printf("enter: pv at %x: %x/%x/%x\n", 1107 pv, pv->pv_va, pv->pv_pmap, pv->pv_next); 1108 #endif 1109 /* 1110 * No entries yet, use header as the first entry 1111 */ 1112 if (pv->pv_pmap == PMAP_NULL) { 1113 #ifdef DEBUG 1114 enter_stats.firstpv++; 1115 #endif 1116 pv->pv_va = va; 1117 pv->pv_pmap = pmap; 1118 pv->pv_next = PV_ENTRY_NULL; 1119 pv->pv_ptste = ST_ENTRY_NULL; 1120 pv->pv_ptpmap = PMAP_NULL; 1121 pv->pv_flags = 0; 1122 } 1123 /* 1124 * There is at least one other VA mapping this page. 1125 * Place this entry after the header. 1126 */ 1127 else { 1128 #ifdef DEBUG 1129 for (npv = pv; npv; npv = npv->pv_next) 1130 if (pmap == npv->pv_pmap && va == npv->pv_va) 1131 panic("pmap_enter: already in pv_tab"); 1132 #endif 1133 npv = (pv_entry_t) 1134 malloc(sizeof *npv, M_VMPVENT, M_NOWAIT); 1135 npv->pv_va = va; 1136 npv->pv_pmap = pmap; 1137 npv->pv_next = pv->pv_next; 1138 npv->pv_ptste = ST_ENTRY_NULL; 1139 npv->pv_ptpmap = PMAP_NULL; 1140 pv->pv_next = npv; 1141 #ifdef DEBUG 1142 if (!npv->pv_next) 1143 enter_stats.secondpv++; 1144 #endif 1145 /* 1146 * Since there is another logical mapping for the 1147 * same page we may need to cache-inhibit the 1148 * descriptors on those CPUs with external VACs. 1149 * We don't need to CI if: 1150 * 1151 * - No two mappings belong to the same user pmaps. 1152 * Since the cache is flushed on context switches 1153 * there is no problem between user processes. 1154 * 1155 * - Mappings within a single pmap are a certain 1156 * magic distance apart. VAs at these appropriate 1157 * boundaries map to the same cache entries or 1158 * otherwise don't conflict. 1159 * 1160 * To keep it simple, we only check for these special 1161 * cases if there are only two mappings, otherwise we 1162 * punt and always CI. 1163 * 1164 * Note that there are no aliasing problems with the 1165 * on-chip data-cache when the WA bit is set. 1166 */ 1167 if (pmap_aliasmask) { 1168 if (pv->pv_flags & PV_CI) { 1169 #ifdef DEBUG 1170 if (pmapdebug & PDB_CACHE) 1171 printf("enter: pa %x already CI'ed\n", 1172 pa); 1173 #endif 1174 checkpv = cacheable = FALSE; 1175 } else if (npv->pv_next || 1176 ((pmap == pv->pv_pmap || 1177 pmap == kernel_pmap || 1178 pv->pv_pmap == kernel_pmap) && 1179 ((pv->pv_va & pmap_aliasmask) != 1180 (va & pmap_aliasmask)))) { 1181 #ifdef DEBUG 1182 if (pmapdebug & PDB_CACHE) 1183 printf("enter: pa %x CI'ing all\n", 1184 pa); 1185 #endif 1186 cacheable = FALSE; 1187 pv->pv_flags |= PV_CI; 1188 #ifdef DEBUG 1189 enter_stats.ci++; 1190 #endif 1191 } 1192 } 1193 } 1194 splx(s); 1195 } 1196 /* 1197 * Assumption: if it is not part of our managed memory 1198 * then it must be device memory which may be volitile. 1199 */ 1200 else if (pmap_initialized) { 1201 checkpv = cacheable = FALSE; 1202 #ifdef DEBUG 1203 enter_stats.unmanaged++; 1204 #endif 1205 } 1206 1207 /* 1208 * Increment counters 1209 */ 1210 pmap->pm_stats.resident_count++; 1211 if (wired) 1212 pmap->pm_stats.wired_count++; 1213 1214 validate: 1215 /* 1216 * Flush VAC to ensure we get correct state of HW bits 1217 * so we don't clobber them. 1218 */ 1219 if (pmap_aliasmask) 1220 DCIS(); 1221 /* 1222 * Now validate mapping with desired protection/wiring. 1223 * Assume uniform modified and referenced status for all 1224 * HP pages in a MACH page. 1225 */ 1226 npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V; 1227 npte |= (*(int *)pte & (PG_M|PG_U)); 1228 if (wired) 1229 npte |= PG_W; 1230 if (!checkpv && !cacheable) 1231 npte |= PG_CI; 1232 #ifdef DEBUG 1233 if (pmapdebug & PDB_ENTER) 1234 printf("enter: new pte value %x\n", npte); 1235 #endif 1236 ix = 0; 1237 do { 1238 *(int *)pte++ = npte; 1239 TBIS(va); 1240 npte += HP_PAGE_SIZE; 1241 va += HP_PAGE_SIZE; 1242 } while (++ix != hppagesperpage); 1243 /* 1244 * The following is executed if we are entering a second 1245 * (or greater) mapping for a physical page and the mappings 1246 * may create an aliasing problem. In this case we must 1247 * cache inhibit the descriptors involved and flush any 1248 * external VAC. 1249 */ 1250 if (checkpv && !cacheable) { 1251 pmap_changebit(pa, PG_CI, TRUE); 1252 DCIA(); 1253 #ifdef DEBUG 1254 enter_stats.flushes++; 1255 #endif 1256 #ifdef DEBUG 1257 if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) == 1258 (PDB_CACHE|PDB_PVDUMP)) 1259 pmap_pvdump(pa); 1260 #endif 1261 } 1262 #ifdef DEBUG 1263 else if (pmapvacflush & PVF_ENTER) { 1264 if (pmapvacflush & PVF_TOTAL) 1265 DCIA(); 1266 else if (pmap == kernel_pmap) 1267 DCIS(); 1268 else 1269 DCIU(); 1270 } 1271 if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap) { 1272 va -= PAGE_SIZE; 1273 pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va))); 1274 } 1275 #endif 1276 } 1277 1278 /* 1279 * Routine: pmap_change_wiring 1280 * Function: Change the wiring attribute for a map/virtual-address 1281 * pair. 1282 * In/out conditions: 1283 * The mapping must already exist in the pmap. 1284 */ 1285 void 1286 pmap_change_wiring(pmap, va, wired) 1287 register pmap_t pmap; 1288 vm_offset_t va; 1289 boolean_t wired; 1290 { 1291 register pt_entry_t *pte; 1292 register int ix; 1293 1294 #ifdef DEBUG 1295 if (pmapdebug & PDB_FOLLOW) 1296 printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired); 1297 #endif 1298 if (pmap == PMAP_NULL) 1299 return; 1300 1301 pte = pmap_pte(pmap, va); 1302 #ifdef DEBUG 1303 /* 1304 * Page table page is not allocated. 1305 * Should this ever happen? Ignore it for now, 1306 * we don't want to force allocation of unnecessary PTE pages. 1307 */ 1308 if (!pmap_ste_v(pmap_ste(pmap, va))) { 1309 if (pmapdebug & PDB_PARANOIA) 1310 printf("pmap_change_wiring: invalid STE for %x\n", va); 1311 return; 1312 } 1313 /* 1314 * Page not valid. Should this ever happen? 1315 * Just continue and change wiring anyway. 1316 */ 1317 if (!pmap_pte_v(pte)) { 1318 if (pmapdebug & PDB_PARANOIA) 1319 printf("pmap_change_wiring: invalid PTE for %x\n", va); 1320 } 1321 #endif 1322 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { 1323 if (wired) 1324 pmap->pm_stats.wired_count++; 1325 else 1326 pmap->pm_stats.wired_count--; 1327 } 1328 /* 1329 * Wiring is not a hardware characteristic so there is no need 1330 * to invalidate TLB. 1331 */ 1332 ix = 0; 1333 do { 1334 pmap_pte_set_w(pte++, wired); 1335 } while (++ix != hppagesperpage); 1336 } 1337 1338 /* 1339 * Routine: pmap_extract 1340 * Function: 1341 * Extract the physical page address associated 1342 * with the given map/virtual_address pair. 1343 */ 1344 1345 vm_offset_t 1346 pmap_extract(pmap, va) 1347 register pmap_t pmap; 1348 vm_offset_t va; 1349 { 1350 register vm_offset_t pa; 1351 1352 #ifdef DEBUG 1353 if (pmapdebug & PDB_FOLLOW) 1354 printf("pmap_extract(%x, %x) -> ", pmap, va); 1355 #endif 1356 pa = 0; 1357 if (pmap && pmap_ste_v(pmap_ste(pmap, va))) 1358 pa = *(int *)pmap_pte(pmap, va); 1359 if (pa) 1360 pa = (pa & PG_FRAME) | (va & ~PG_FRAME); 1361 #ifdef DEBUG 1362 if (pmapdebug & PDB_FOLLOW) 1363 printf("%x\n", pa); 1364 #endif 1365 return(pa); 1366 } 1367 1368 /* 1369 * Copy the range specified by src_addr/len 1370 * from the source map to the range dst_addr/len 1371 * in the destination map. 1372 * 1373 * This routine is only advisory and need not do anything. 1374 */ 1375 void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 1376 pmap_t dst_pmap; 1377 pmap_t src_pmap; 1378 vm_offset_t dst_addr; 1379 vm_size_t len; 1380 vm_offset_t src_addr; 1381 { 1382 #ifdef DEBUG 1383 if (pmapdebug & PDB_FOLLOW) 1384 printf("pmap_copy(%x, %x, %x, %x, %x)\n", 1385 dst_pmap, src_pmap, dst_addr, len, src_addr); 1386 #endif 1387 } 1388 1389 /* 1390 * Require that all active physical maps contain no 1391 * incorrect entries NOW. [This update includes 1392 * forcing updates of any address map caching.] 1393 * 1394 * Generally used to insure that a thread about 1395 * to run will see a semantically correct world. 1396 */ 1397 void pmap_update() 1398 { 1399 #ifdef DEBUG 1400 if (pmapdebug & PDB_FOLLOW) 1401 printf("pmap_update()\n"); 1402 #endif 1403 TBIA(); 1404 } 1405 1406 /* 1407 * Routine: pmap_collect 1408 * Function: 1409 * Garbage collects the physical map system for 1410 * pages which are no longer used. 1411 * Success need not be guaranteed -- that is, there 1412 * may well be pages which are not referenced, but 1413 * others may be collected. 1414 * Usage: 1415 * Called by the pageout daemon when pages are scarce. 1416 */ 1417 void 1418 pmap_collect(pmap) 1419 pmap_t pmap; 1420 { 1421 register vm_offset_t pa; 1422 register pv_entry_t pv; 1423 register int *pte; 1424 vm_offset_t kpa; 1425 int s; 1426 1427 #ifdef DEBUG 1428 int *ste; 1429 int opmapdebug; 1430 #endif 1431 if (pmap != kernel_pmap) 1432 return; 1433 1434 #ifdef DEBUG 1435 if (pmapdebug & PDB_FOLLOW) 1436 printf("pmap_collect(%x)\n", pmap); 1437 kpt_stats.collectscans++; 1438 #endif 1439 s = splimp(); 1440 for (pa = vm_first_phys; pa < vm_last_phys; pa += PAGE_SIZE) { 1441 register struct kpt_page *kpt, **pkpt; 1442 1443 /* 1444 * Locate physical pages which are being used as kernel 1445 * page table pages. 1446 */ 1447 pv = pa_to_pvh(pa); 1448 if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE)) 1449 continue; 1450 do { 1451 if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap) 1452 break; 1453 } while (pv = pv->pv_next); 1454 if (pv == PV_ENTRY_NULL) 1455 continue; 1456 #ifdef DEBUG 1457 if (pv->pv_va < (vm_offset_t)Sysmap || 1458 pv->pv_va >= (vm_offset_t)Sysmap + HP_MAX_PTSIZE) 1459 printf("collect: kernel PT VA out of range\n"); 1460 else 1461 goto ok; 1462 pmap_pvdump(pa); 1463 continue; 1464 ok: 1465 #endif 1466 pte = (int *)(pv->pv_va + HP_PAGE_SIZE); 1467 while (--pte >= (int *)pv->pv_va && *pte == PG_NV) 1468 ; 1469 if (pte >= (int *)pv->pv_va) 1470 continue; 1471 1472 #ifdef DEBUG 1473 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) { 1474 printf("collect: freeing KPT page at %x (ste %x@%x)\n", 1475 pv->pv_va, *(int *)pv->pv_ptste, pv->pv_ptste); 1476 opmapdebug = pmapdebug; 1477 pmapdebug |= PDB_PTPAGE; 1478 } 1479 1480 ste = (int *)pv->pv_ptste; 1481 #endif 1482 /* 1483 * If all entries were invalid we can remove the page. 1484 * We call pmap_remove to take care of invalidating ST 1485 * and Sysptmap entries. 1486 */ 1487 kpa = pmap_extract(pmap, pv->pv_va); 1488 pmap_remove(pmap, pv->pv_va, pv->pv_va + HP_PAGE_SIZE); 1489 /* 1490 * Use the physical address to locate the original 1491 * (kmem_alloc assigned) address for the page and put 1492 * that page back on the free list. 1493 */ 1494 for (pkpt = &kpt_used_list, kpt = *pkpt; 1495 kpt != (struct kpt_page *)0; 1496 pkpt = &kpt->kpt_next, kpt = *pkpt) 1497 if (kpt->kpt_pa == kpa) 1498 break; 1499 #ifdef DEBUG 1500 if (kpt == (struct kpt_page *)0) 1501 panic("pmap_collect: lost a KPT page"); 1502 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) 1503 printf("collect: %x (%x) to free list\n", 1504 kpt->kpt_va, kpa); 1505 #endif 1506 *pkpt = kpt->kpt_next; 1507 kpt->kpt_next = kpt_free_list; 1508 kpt_free_list = kpt; 1509 #ifdef DEBUG 1510 kpt_stats.kptinuse--; 1511 kpt_stats.collectpages++; 1512 if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) 1513 pmapdebug = opmapdebug; 1514 1515 if (*ste) 1516 printf("collect: kernel STE at %x still valid (%x)\n", 1517 ste, *ste); 1518 ste = (int *)&Sysptmap[(st_entry_t *)ste-pmap_ste(kernel_pmap, 0)]; 1519 if (*ste) 1520 printf("collect: kernel PTmap at %x still valid (%x)\n", 1521 ste, *ste); 1522 #endif 1523 } 1524 splx(s); 1525 } 1526 1527 void 1528 pmap_activate(pmap, pcbp) 1529 register pmap_t pmap; 1530 struct pcb *pcbp; 1531 { 1532 #ifdef DEBUG 1533 if (pmapdebug & (PDB_FOLLOW|PDB_SEGTAB)) 1534 printf("pmap_activate(%x, %x)\n", pmap, pcbp); 1535 #endif 1536 PMAP_ACTIVATE(pmap, pcbp); 1537 } 1538 1539 /* 1540 * Routine: pmap_kernel 1541 * Function: 1542 * Returns the physical map handle for the kernel. 1543 */ 1544 pmap_t 1545 pmap_kernel() 1546 { 1547 return (kernel_pmap); 1548 } 1549 1550 /* 1551 * pmap_zero_page zeros the specified (machine independent) 1552 * page by mapping the page into virtual memory and using 1553 * bzero to clear its contents, one machine dependent page 1554 * at a time. 1555 */ 1556 pmap_zero_page(phys) 1557 register vm_offset_t phys; 1558 { 1559 register int ix; 1560 1561 #ifdef DEBUG 1562 if (pmapdebug & PDB_FOLLOW) 1563 printf("pmap_zero_page(%x)\n", phys); 1564 #endif 1565 phys >>= PG_SHIFT; 1566 ix = 0; 1567 do { 1568 clearseg(phys++); 1569 } while (++ix != hppagesperpage); 1570 } 1571 1572 /* 1573 * pmap_copy_page copies the specified (machine independent) 1574 * page by mapping the page into virtual memory and using 1575 * bcopy to copy the page, one machine dependent page at a 1576 * time. 1577 */ 1578 pmap_copy_page(src, dst) 1579 register vm_offset_t src, dst; 1580 { 1581 register int ix; 1582 1583 #ifdef DEBUG 1584 if (pmapdebug & PDB_FOLLOW) 1585 printf("pmap_copy_page(%x, %x)\n", src, dst); 1586 #endif 1587 src >>= PG_SHIFT; 1588 dst >>= PG_SHIFT; 1589 ix = 0; 1590 do { 1591 physcopyseg(src++, dst++); 1592 } while (++ix != hppagesperpage); 1593 } 1594 1595 1596 /* 1597 * Routine: pmap_pageable 1598 * Function: 1599 * Make the specified pages (by pmap, offset) 1600 * pageable (or not) as requested. 1601 * 1602 * A page which is not pageable may not take 1603 * a fault; therefore, its page table entry 1604 * must remain valid for the duration. 1605 * 1606 * This routine is merely advisory; pmap_enter 1607 * will specify that these pages are to be wired 1608 * down (or not) as appropriate. 1609 */ 1610 pmap_pageable(pmap, sva, eva, pageable) 1611 pmap_t pmap; 1612 vm_offset_t sva, eva; 1613 boolean_t pageable; 1614 { 1615 #ifdef DEBUG 1616 if (pmapdebug & PDB_FOLLOW) 1617 printf("pmap_pageable(%x, %x, %x, %x)\n", 1618 pmap, sva, eva, pageable); 1619 #endif 1620 /* 1621 * If we are making a PT page pageable then all valid 1622 * mappings must be gone from that page. Hence it should 1623 * be all zeros and there is no need to clean it. 1624 * Assumptions: 1625 * - we are called with only one page at a time 1626 * - PT pages have only one pv_table entry 1627 */ 1628 if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) { 1629 register pv_entry_t pv; 1630 register vm_offset_t pa; 1631 1632 #ifdef DEBUG 1633 if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE) 1634 printf("pmap_pageable(%x, %x, %x, %x)\n", 1635 pmap, sva, eva, pageable); 1636 #endif 1637 if (!pmap_ste_v(pmap_ste(pmap, sva))) 1638 return; 1639 pa = pmap_pte_pa(pmap_pte(pmap, sva)); 1640 if (pa < vm_first_phys || pa >= vm_last_phys) 1641 return; 1642 pv = pa_to_pvh(pa); 1643 if (pv->pv_ptste == ST_ENTRY_NULL) 1644 return; 1645 #ifdef DEBUG 1646 if (pv->pv_va != sva || pv->pv_next) { 1647 printf("pmap_pageable: bad PT page va %x next %x\n", 1648 pv->pv_va, pv->pv_next); 1649 return; 1650 } 1651 #endif 1652 /* 1653 * Mark it unmodified to avoid pageout 1654 */ 1655 pmap_clear_modify(pa); 1656 #ifdef DEBUG 1657 if (pmapdebug & PDB_PTPAGE) 1658 printf("pmap_pageable: PT page %x(%x) unmodified\n", 1659 sva, *(int *)pmap_pte(pmap, sva)); 1660 if (pmapdebug & PDB_WIRING) 1661 pmap_check_wiring("pageable", sva); 1662 #endif 1663 } 1664 } 1665 1666 /* 1667 * Clear the modify bits on the specified physical page. 1668 */ 1669 1670 void 1671 pmap_clear_modify(pa) 1672 vm_offset_t pa; 1673 { 1674 #ifdef DEBUG 1675 if (pmapdebug & PDB_FOLLOW) 1676 printf("pmap_clear_modify(%x)\n", pa); 1677 #endif 1678 pmap_changebit(pa, PG_M, FALSE); 1679 } 1680 1681 /* 1682 * pmap_clear_reference: 1683 * 1684 * Clear the reference bit on the specified physical page. 1685 */ 1686 1687 void pmap_clear_reference(pa) 1688 vm_offset_t pa; 1689 { 1690 #ifdef DEBUG 1691 if (pmapdebug & PDB_FOLLOW) 1692 printf("pmap_clear_reference(%x)\n", pa); 1693 #endif 1694 pmap_changebit(pa, PG_U, FALSE); 1695 } 1696 1697 /* 1698 * pmap_is_referenced: 1699 * 1700 * Return whether or not the specified physical page is referenced 1701 * by any physical maps. 1702 */ 1703 1704 boolean_t 1705 pmap_is_referenced(pa) 1706 vm_offset_t pa; 1707 { 1708 #ifdef DEBUG 1709 if (pmapdebug & PDB_FOLLOW) { 1710 boolean_t rv = pmap_testbit(pa, PG_U); 1711 printf("pmap_is_referenced(%x) -> %c\n", pa, "FT"[rv]); 1712 return(rv); 1713 } 1714 #endif 1715 return(pmap_testbit(pa, PG_U)); 1716 } 1717 1718 /* 1719 * pmap_is_modified: 1720 * 1721 * Return whether or not the specified physical page is modified 1722 * by any physical maps. 1723 */ 1724 1725 boolean_t 1726 pmap_is_modified(pa) 1727 vm_offset_t pa; 1728 { 1729 #ifdef DEBUG 1730 if (pmapdebug & PDB_FOLLOW) { 1731 boolean_t rv = pmap_testbit(pa, PG_M); 1732 printf("pmap_is_modified(%x) -> %c\n", pa, "FT"[rv]); 1733 return(rv); 1734 } 1735 #endif 1736 return(pmap_testbit(pa, PG_M)); 1737 } 1738 1739 vm_offset_t 1740 pmap_phys_address(ppn) 1741 int ppn; 1742 { 1743 return(hp300_ptob(ppn)); 1744 } 1745 1746 /* 1747 * Miscellaneous support routines follow 1748 */ 1749 1750 /* static */ 1751 hp300_protection_init() 1752 { 1753 register int *kp, prot; 1754 1755 kp = protection_codes; 1756 for (prot = 0; prot < 8; prot++) { 1757 switch (prot) { 1758 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 1759 *kp++ = 0; 1760 break; 1761 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 1762 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 1763 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 1764 *kp++ = PG_RO; 1765 break; 1766 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 1767 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 1768 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 1769 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 1770 *kp++ = PG_RW; 1771 break; 1772 } 1773 } 1774 } 1775 1776 /* static */ 1777 boolean_t 1778 pmap_testbit(pa, bit) 1779 register vm_offset_t pa; 1780 int bit; 1781 { 1782 register pv_entry_t pv; 1783 register int *pte, ix; 1784 int s; 1785 1786 if (pa < vm_first_phys || pa >= vm_last_phys) 1787 return(FALSE); 1788 1789 pv = pa_to_pvh(pa); 1790 s = splimp(); 1791 /* 1792 * Check saved info first 1793 */ 1794 if (pmap_attributes[pa_index(pa)] & bit) { 1795 splx(s); 1796 return(TRUE); 1797 } 1798 /* 1799 * Flush VAC to get correct state of any hardware maintained bits. 1800 */ 1801 if (pmap_aliasmask && (bit & (PG_U|PG_M))) 1802 DCIS(); 1803 /* 1804 * Not found, check current mappings returning 1805 * immediately if found. 1806 */ 1807 if (pv->pv_pmap != PMAP_NULL) { 1808 for (; pv; pv = pv->pv_next) { 1809 pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va); 1810 ix = 0; 1811 do { 1812 if (*pte++ & bit) { 1813 splx(s); 1814 return(TRUE); 1815 } 1816 } while (++ix != hppagesperpage); 1817 } 1818 } 1819 splx(s); 1820 return(FALSE); 1821 } 1822 1823 /* static */ 1824 pmap_changebit(pa, bit, setem) 1825 register vm_offset_t pa; 1826 int bit; 1827 boolean_t setem; 1828 { 1829 register pv_entry_t pv; 1830 register int *pte, npte, ix; 1831 vm_offset_t va; 1832 int s; 1833 boolean_t firstpage = TRUE; 1834 1835 #ifdef DEBUG 1836 if (pmapdebug & PDB_BITS) 1837 printf("pmap_changebit(%x, %x, %s)\n", 1838 pa, bit, setem ? "set" : "clear"); 1839 #endif 1840 if (pa < vm_first_phys || pa >= vm_last_phys) 1841 return; 1842 1843 pv = pa_to_pvh(pa); 1844 s = splimp(); 1845 /* 1846 * Clear saved attributes (modify, reference) 1847 */ 1848 if (!setem) 1849 pmap_attributes[pa_index(pa)] &= ~bit; 1850 /* 1851 * Loop over all current mappings setting/clearing as appropos 1852 * If setting RO do we need to clear the VAC? 1853 */ 1854 if (pv->pv_pmap != PMAP_NULL) { 1855 #ifdef DEBUG 1856 int toflush = 0; 1857 #endif 1858 for (; pv; pv = pv->pv_next) { 1859 #ifdef DEBUG 1860 toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1; 1861 #endif 1862 va = pv->pv_va; 1863 pte = (int *) pmap_pte(pv->pv_pmap, va); 1864 /* 1865 * Flush VAC to ensure we get correct state of HW bits 1866 * so we don't clobber them. 1867 */ 1868 if (firstpage && pmap_aliasmask) { 1869 firstpage = FALSE; 1870 DCIS(); 1871 } 1872 ix = 0; 1873 do { 1874 if (setem) 1875 npte = *pte | bit; 1876 else 1877 npte = *pte & ~bit; 1878 if (*pte != npte) { 1879 *pte = npte; 1880 TBIS(va); 1881 } 1882 va += HP_PAGE_SIZE; 1883 pte++; 1884 } while (++ix != hppagesperpage); 1885 } 1886 #ifdef DEBUG 1887 if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) { 1888 if ((pmapvacflush & PVF_TOTAL) || toflush == 3) 1889 DCIA(); 1890 else if (toflush == 2) 1891 DCIS(); 1892 else 1893 DCIU(); 1894 } 1895 #endif 1896 } 1897 splx(s); 1898 } 1899 1900 /* static */ 1901 void 1902 pmap_enter_ptpage(pmap, va) 1903 register pmap_t pmap; 1904 register vm_offset_t va; 1905 { 1906 register vm_offset_t ptpa; 1907 register pv_entry_t pv; 1908 st_entry_t *ste; 1909 int s; 1910 1911 #ifdef DEBUG 1912 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE)) 1913 printf("pmap_enter_ptpage: pmap %x, va %x\n", pmap, va); 1914 enter_stats.ptpneeded++; 1915 #endif 1916 /* 1917 * Allocate a segment table if necessary. Note that it is allocated 1918 * from kernel_map and not pt_map. This keeps user page tables 1919 * aligned on segment boundaries in the kernel address space. 1920 * The segment table is wired down. It will be freed whenever the 1921 * reference count drops to zero. 1922 */ 1923 if (pmap->pm_stab == Segtabzero) { 1924 pmap->pm_stab = (st_entry_t *) 1925 kmem_alloc(kernel_map, HP_STSIZE); 1926 pmap->pm_stchanged = TRUE; 1927 /* 1928 * XXX may have changed segment table pointer for current 1929 * process so update now to reload hardware. 1930 */ 1931 if (pmap == u.u_procp->p_map->pmap) 1932 PMAP_ACTIVATE(pmap, (struct pcb *)u.u_procp->p_addr); 1933 #ifdef DEBUG 1934 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB)) 1935 printf("enter: pmap %x stab %x\n", 1936 pmap, pmap->pm_stab); 1937 #endif 1938 } 1939 1940 ste = pmap_ste(pmap, va); 1941 va = trunc_page((vm_offset_t)pmap_pte(pmap, va)); 1942 1943 /* 1944 * In the kernel we allocate a page from the kernel PT page 1945 * free list and map it into the kernel page table map (via 1946 * pmap_enter). 1947 */ 1948 if (pmap == kernel_pmap) { 1949 register struct kpt_page *kpt; 1950 1951 s = splimp(); 1952 if ((kpt = kpt_free_list) == (struct kpt_page *)0) { 1953 /* 1954 * No PT pages available. 1955 * Try once to free up unused ones. 1956 */ 1957 #ifdef DEBUG 1958 if (pmapdebug & PDB_COLLECT) 1959 printf("enter: no KPT pages, collecting...\n"); 1960 #endif 1961 pmap_collect(kernel_pmap); 1962 if ((kpt = kpt_free_list) == (struct kpt_page *)0) 1963 panic("pmap_enter_ptpage: can't get KPT page"); 1964 } 1965 #ifdef DEBUG 1966 if (++kpt_stats.kptinuse > kpt_stats.kptmaxuse) 1967 kpt_stats.kptmaxuse = kpt_stats.kptinuse; 1968 #endif 1969 kpt_free_list = kpt->kpt_next; 1970 kpt->kpt_next = kpt_used_list; 1971 kpt_used_list = kpt; 1972 ptpa = kpt->kpt_pa; 1973 bzero(kpt->kpt_va, HP_PAGE_SIZE); 1974 pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT, TRUE); 1975 #ifdef DEBUG 1976 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) 1977 printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n", 1978 ste - pmap_ste(pmap, 0), 1979 *(int *)&Sysptmap[ste - pmap_ste(pmap, 0)], 1980 kpt->kpt_va); 1981 #endif 1982 splx(s); 1983 } 1984 /* 1985 * For user processes we just simulate a fault on that location 1986 * letting the VM system allocate a zero-filled page. 1987 */ 1988 else { 1989 #ifdef DEBUG 1990 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) 1991 printf("enter: about to fault UPT pg at %x\n", va); 1992 #endif 1993 if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE) 1994 != KERN_SUCCESS) 1995 panic("pmap_enter: vm_fault failed"); 1996 ptpa = pmap_extract(kernel_pmap, va); 1997 #ifdef DEBUG 1998 PHYS_TO_VM_PAGE(ptpa)->ptpage = TRUE; 1999 #endif 2000 } 2001 2002 /* 2003 * Locate the PV entry in the kernel for this PT page and 2004 * record the STE address. This is so that we can invalidate 2005 * the STE when we remove the mapping for the page. 2006 */ 2007 pv = pa_to_pvh(ptpa); 2008 s = splimp(); 2009 if (pv) { 2010 pv->pv_flags |= PV_PTPAGE; 2011 do { 2012 if (pv->pv_pmap == kernel_pmap && pv->pv_va == va) 2013 break; 2014 } while (pv = pv->pv_next); 2015 } 2016 #ifdef DEBUG 2017 if (pv == PV_ENTRY_NULL) 2018 panic("pmap_enter_ptpage: PT page not entered"); 2019 #endif 2020 pv->pv_ptste = ste; 2021 pv->pv_ptpmap = pmap; 2022 #ifdef DEBUG 2023 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) 2024 printf("enter: new PT page at PA %x, ste at %x\n", ptpa, ste); 2025 #endif 2026 2027 /* 2028 * Map the new PT page into the segment table. 2029 * Also increment the reference count on the segment table if this 2030 * was a user page table page. Note that we don't use vm_map_pageable 2031 * to keep the count like we do for PT pages, this is mostly because 2032 * it would be difficult to identify ST pages in pmap_pageable to 2033 * release them. We also avoid the overhead of vm_map_pageable. 2034 */ 2035 *(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V; 2036 if (pmap != kernel_pmap) { 2037 pmap->pm_sref++; 2038 #ifdef DEBUG 2039 if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB)) 2040 printf("enter: stab %x refcnt %d\n", 2041 pmap->pm_stab, pmap->pm_sref); 2042 #endif 2043 } 2044 /* 2045 * Flush stale TLB info. 2046 */ 2047 if (pmap == kernel_pmap) 2048 TBIAS(); 2049 else 2050 TBIAU(); 2051 pmap->pm_ptpages++; 2052 splx(s); 2053 } 2054 2055 #ifdef DEBUG 2056 pmap_pvdump(pa) 2057 vm_offset_t pa; 2058 { 2059 register pv_entry_t pv; 2060 2061 printf("pa %x", pa); 2062 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) 2063 printf(" -> pmap %x, va %x, ptste %x, ptpmap %x, flags %x", 2064 pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap, 2065 pv->pv_flags); 2066 printf("\n"); 2067 } 2068 2069 pmap_check_wiring(str, va) 2070 char *str; 2071 vm_offset_t va; 2072 { 2073 vm_map_entry_t entry; 2074 register int count, *pte; 2075 2076 va = trunc_page(va); 2077 if (!pmap_ste_v(pmap_ste(kernel_pmap, va)) || 2078 !pmap_pte_v(pmap_pte(kernel_pmap, va))) 2079 return; 2080 2081 if (!vm_map_lookup_entry(pt_map, va, &entry)) { 2082 printf("wired_check: entry for %x not found\n", va); 2083 return; 2084 } 2085 count = 0; 2086 for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++) 2087 if (*pte) 2088 count++; 2089 if (entry->wired_count != count) 2090 printf("*%s*: %x: w%d/a%d\n", 2091 str, va, entry->wired_count, count); 2092 } 2093 #endif 2094