1 /* 2 * Copyright (c) 1992 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department and Ralph Campbell. 8 * 9 * %sccs.include.redist.c% 10 * 11 * @(#)pmap.c 7.4 (Berkeley) 03/14/92 12 */ 13 14 /* 15 * Manages physical address maps. 16 * 17 * In addition to hardware address maps, this 18 * module is called upon to provide software-use-only 19 * maps which may or may not be stored in the same 20 * form as hardware maps. These pseudo-maps are 21 * used to store intermediate results from copy 22 * operations to and from address spaces. 23 * 24 * Since the information managed by this module is 25 * also stored by the logical address mapping module, 26 * this module may throw away valid virtual-to-physical 27 * mappings at almost any time. However, invalidations 28 * of virtual-to-physical mappings must be done as 29 * requested. 30 * 31 * In order to cope with hardware architectures which 32 * make virtual-to-physical map invalidates expensive, 33 * this module may delay invalidate or reduced protection 34 * operations until such time as they are actually 35 * necessary. This module is given full information as 36 * to which processors are currently using which maps, 37 * and to when physical maps must be made correct. 38 */ 39 40 #include "param.h" 41 #include "proc.h" 42 #include "malloc.h" 43 #include "user.h" 44 45 #include "vm/vm.h" 46 #include "vm/vm_kern.h" 47 #include "vm/vm_page.h" 48 49 #include "../include/machConst.h" 50 #include "../include/pte.h" 51 52 /* 53 * For each vm_page_t, there is a list of all currently valid virtual 54 * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 55 * XXX really should do this as a part of the higher level code. 56 */ 57 typedef struct pv_entry { 58 struct pv_entry *pv_next; /* next pv_entry */ 59 struct pmap *pv_pmap; /* pmap where mapping lies */ 60 vm_offset_t pv_va; /* virtual address for mapping */ 61 } *pv_entry_t; 62 63 pv_entry_t pv_table; /* array of entries, one per page */ 64 extern void pmap_remove_pv(); 65 66 #define pa_index(pa) atop((pa) - first_phys_addr) 67 #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) 68 69 #ifdef DEBUG 70 struct { 71 int kernel; /* entering kernel mapping */ 72 int user; /* entering user mapping */ 73 int ptpneeded; /* needed to allocate a PT page */ 74 int pwchange; /* no mapping change, just wiring or protection */ 75 int wchange; /* no mapping change, just wiring */ 76 int mchange; /* was mapped but mapping to different page */ 77 int managed; /* a managed page */ 78 int firstpv; /* first mapping for this PA */ 79 int secondpv; /* second mapping for this PA */ 80 int ci; /* cache inhibited */ 81 int unmanaged; /* not a managed page */ 82 int flushes; /* cache flushes */ 83 int cachehit; /* new entry forced valid entry out */ 84 } enter_stats; 85 struct { 86 int calls; 87 int removes; 88 int flushes; 89 int pidflushes; /* HW pid stolen */ 90 int pvfirst; 91 int pvsearch; 92 } remove_stats; 93 94 int pmapdebug; 95 #define PDB_FOLLOW 0x0001 96 #define PDB_INIT 0x0002 97 #define PDB_ENTER 0x0004 98 #define PDB_REMOVE 0x0008 99 #define PDB_CREATE 0x0010 100 #define PDB_PTPAGE 0x0020 101 #define PDB_CACHE 0x0040 102 #define PDB_BITS 0x0080 103 #define PDB_COLLECT 0x0100 104 #define PDB_PROTECT 0x0200 105 #define PDB_TLBPID 0x0400 106 #define PDB_PARANOIA 0x2000 107 #define PDB_WIRING 0x4000 108 #define PDB_PVDUMP 0x8000 109 110 #endif /* DEBUG */ 111 112 u_int whichpids[2] = { /* bit mask of hardware PID's in use */ 113 3, 0 114 }; 115 116 struct pmap kernel_pmap_store; 117 pmap_t cur_pmap; /* current pmap mapped in hardware */ 118 119 vm_offset_t avail_start; /* PA of first available physical page */ 120 vm_offset_t avail_end; /* PA of last available physical page */ 121 vm_size_t mem_size; /* memory size in bytes */ 122 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/ 123 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 124 int pmaxpagesperpage; /* PAGE_SIZE / NBPG */ 125 #ifdef ATTR 126 char *pmap_attributes; /* reference and modify bits */ 127 #endif 128 pmap_hash_t zero_pmap_hash; /* empty TLB hash table for init */ 129 130 /* 131 * Bootstrap the system enough to run with virtual memory. 132 */ 133 void 134 pmap_bootstrap(firstaddr) 135 vm_offset_t firstaddr; 136 { 137 register int i; 138 vm_offset_t start = firstaddr; 139 extern int maxmem, physmem; 140 141 /* 142 * Allocate a TLB hash table for the kernel. 143 * This could be a KSEG0 address and thus save TLB entries but 144 * its faster and simpler in assembly language to have a 145 * fixed address that can be accessed with a 16 bit signed offset. 146 * Note: the kernel pm_hash field is null, user pm_hash fields are 147 * either the table or zero_pmap_hash. 148 */ 149 kernel_pmap_store.pm_hash = (pmap_hash_t)0; 150 for (i = 0; i < PMAP_HASH_KPAGES; i++) { 151 MachTLBWriteIndexed(i + UPAGES + PMAP_HASH_UPAGES, 152 PMAP_HASH_KADDR + (i << PGSHIFT), 153 firstaddr | PG_V | PG_M | PG_G); 154 firstaddr += NBPG; 155 } 156 157 /* 158 * Allocate an empty TLB hash table for initial pmap's. 159 */ 160 zero_pmap_hash = (pmap_hash_t)MACH_PHYS_TO_CACHED(firstaddr); 161 162 /* init proc[0]'s pmap hash table */ 163 for (i = 0; i < PMAP_HASH_UPAGES; i++) { 164 kernel_pmap_store.pm_hash_ptes[i] = firstaddr | PG_V | PG_RO; 165 MachTLBWriteIndexed(i + UPAGES, 166 (PMAP_HASH_UADDR + (i << PGSHIFT)) | 167 (1 << VMMACH_TLB_PID_SHIFT), 168 kernel_pmap_store.pm_hash_ptes[i]); 169 firstaddr += NBPG; 170 } 171 172 /* 173 * Allocate memory for pv_table. 174 * This will allocate more entries than we really need. 175 * We should do this in pmap_init when we know the actual 176 * phys_start and phys_end but its better to use phys addresses 177 * rather than kernel virtual addresses mapped through the TLB. 178 */ 179 i = (maxmem - pmax_btop(firstaddr)) * sizeof(struct pv_entry); 180 i = pmax_round_page(i); 181 pv_table = (pv_entry_t)MACH_PHYS_TO_CACHED(firstaddr); 182 firstaddr += i; 183 184 /* 185 * Clear allocated memory. 186 */ 187 bzero((caddr_t)MACH_PHYS_TO_CACHED(start), firstaddr - start); 188 189 avail_start = firstaddr; 190 avail_end = pmax_ptob(maxmem); 191 mem_size = avail_end - avail_start; 192 193 virtual_avail = VM_MIN_KERNEL_ADDRESS; 194 virtual_end = VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES * NPTEPG * NBPG; 195 /* XXX need to decide how to set cnt.v_page_size */ 196 pmaxpagesperpage = 1; 197 198 cur_pmap = &kernel_pmap_store; 199 simple_lock_init(&kernel_pmap_store.pm_lock); 200 kernel_pmap_store.pm_count = 1; 201 } 202 203 /* 204 * Bootstrap memory allocator. This function allows for early dynamic 205 * memory allocation until the virtual memory system has been bootstrapped. 206 * After that point, either kmem_alloc or malloc should be used. This 207 * function works by stealing pages from the (to be) managed page pool, 208 * stealing virtual address space, then mapping the pages and zeroing them. 209 * 210 * It should be used from pmap_bootstrap till vm_page_startup, afterwards 211 * it cannot be used, and will generate a panic if tried. Note that this 212 * memory will never be freed, and in essence it is wired down. 213 */ 214 void * 215 pmap_bootstrap_alloc(size) 216 int size; 217 { 218 vm_offset_t val; 219 extern boolean_t vm_page_startup_initialized; 220 221 if (vm_page_startup_initialized) 222 panic("pmap_bootstrap_alloc: called after startup initialized"); 223 224 val = MACH_PHYS_TO_CACHED(avail_start); 225 size = round_page(size); 226 avail_start += size; 227 228 blkclr((caddr_t)val, size); 229 return ((void *)val); 230 } 231 232 /* 233 * Initialize the pmap module. 234 * Called by vm_init, to initialize any structures that the pmap 235 * system needs to map virtual memory. 236 */ 237 void 238 pmap_init(phys_start, phys_end) 239 vm_offset_t phys_start, phys_end; 240 { 241 242 #ifdef DEBUG 243 if (pmapdebug & PDB_FOLLOW) 244 printf("pmap_init(%x, %x)\n", phys_start, phys_end); 245 #endif 246 } 247 248 /* 249 * Used to map a range of physical addresses into kernel 250 * virtual address space. 251 * 252 * This routine should only be called by vm_page_startup() 253 * with KSEG0 addresses. 254 */ 255 vm_offset_t 256 pmap_map(virt, start, end, prot) 257 vm_offset_t virt; 258 vm_offset_t start; 259 vm_offset_t end; 260 int prot; 261 { 262 263 #ifdef DEBUG 264 if (pmapdebug & PDB_FOLLOW) 265 printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot); 266 #endif 267 268 return (round_page(end)); 269 } 270 271 /* 272 * Create and return a physical map. 273 * 274 * If the size specified for the map 275 * is zero, the map is an actual physical 276 * map, and may be referenced by the 277 * hardware. 278 * 279 * If the size specified is non-zero, 280 * the map will be used in software only, and 281 * is bounded by that size. 282 */ 283 pmap_t 284 pmap_create(size) 285 vm_size_t size; 286 { 287 register pmap_t pmap; 288 289 #ifdef DEBUG 290 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 291 printf("pmap_create(%x)\n", size); 292 #endif 293 /* 294 * Software use map does not need a pmap 295 */ 296 if (size) 297 return (NULL); 298 299 printf("pmap_create(%x) XXX\n", size); /* XXX */ 300 /* XXX: is it ok to wait here? */ 301 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); 302 #ifdef notifwewait 303 if (pmap == NULL) 304 panic("pmap_create: cannot allocate a pmap"); 305 #endif 306 bzero(pmap, sizeof(*pmap)); 307 pmap_pinit(pmap); 308 return (pmap); 309 } 310 311 /* 312 * Initialize a preallocated and zeroed pmap structure, 313 * such as one in a vmspace structure. 314 */ 315 void 316 pmap_pinit(pmap) 317 register struct pmap *pmap; 318 { 319 register int i; 320 extern struct vmspace vmspace0; 321 322 #ifdef DEBUG 323 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 324 printf("pmap_pinit(%x)\n", pmap); 325 #endif 326 simple_lock_init(&pmap->pm_lock); 327 pmap->pm_count = 1; 328 pmap->pm_flags = 0; 329 pmap->pm_hash = zero_pmap_hash; 330 for (i = 0; i < PMAP_HASH_UPAGES; i++) 331 pmap->pm_hash_ptes[i] = 332 (MACH_CACHED_TO_PHYS(zero_pmap_hash) + (i << PGSHIFT)) | 333 PG_V | PG_RO; 334 if (pmap == &vmspace0.vm_pmap) 335 pmap->pm_tlbpid = 1; /* preallocated in mach_init() */ 336 else 337 pmap->pm_tlbpid = -1; /* none allocated yet */ 338 } 339 340 /* 341 * Retire the given physical map from service. 342 * Should only be called if the map contains 343 * no valid mappings. 344 */ 345 void 346 pmap_destroy(pmap) 347 register pmap_t pmap; 348 { 349 int count; 350 351 #ifdef DEBUG 352 if (pmapdebug & PDB_FOLLOW) 353 printf("pmap_destroy(%x)\n", pmap); 354 #endif 355 if (pmap == NULL) 356 return; 357 358 printf("pmap_destroy(%x) XXX\n", pmap); /* XXX */ 359 simple_lock(&pmap->pm_lock); 360 count = --pmap->pm_count; 361 simple_unlock(&pmap->pm_lock); 362 if (count == 0) { 363 pmap_release(pmap); 364 free((caddr_t)pmap, M_VMPMAP); 365 } 366 } 367 368 /* 369 * Release any resources held by the given physical map. 370 * Called when a pmap initialized by pmap_pinit is being released. 371 * Should only be called if the map contains no valid mappings. 372 */ 373 void 374 pmap_release(pmap) 375 register pmap_t pmap; 376 { 377 register int id; 378 #ifdef DIAGNOSTIC 379 register int i; 380 #endif 381 382 #ifdef DEBUG 383 if (pmapdebug & PDB_FOLLOW) 384 printf("pmap_release(%x)\n", pmap); 385 #endif 386 387 if (pmap->pm_hash && pmap->pm_hash != zero_pmap_hash) { 388 kmem_free(kernel_map, (vm_offset_t)pmap->pm_hash, 389 PMAP_HASH_SIZE); 390 pmap->pm_hash = zero_pmap_hash; 391 } 392 if ((id = pmap->pm_tlbpid) < 0) 393 return; 394 #ifdef DIAGNOSTIC 395 if (!(whichpids[id >> 5] & (1 << (id & 0x1F)))) 396 panic("pmap_release: id free"); 397 #endif 398 MachTLBFlushPID(id); 399 whichpids[id >> 5] &= ~(1 << (id & 0x1F)); 400 pmap->pm_flags &= ~PM_MODIFIED; 401 pmap->pm_tlbpid = -1; 402 if (pmap == cur_pmap) 403 cur_pmap = (pmap_t)0; 404 #ifdef DIAGNOSTIC 405 /* invalidate user PTE cache */ 406 for (i = 0; i < PMAP_HASH_UPAGES; i++) 407 MachTLBWriteIndexed(i + UPAGES, MACH_RESERVED_ADDR, 0); 408 #endif 409 } 410 411 /* 412 * Add a reference to the specified pmap. 413 */ 414 void 415 pmap_reference(pmap) 416 pmap_t pmap; 417 { 418 419 #ifdef DEBUG 420 if (pmapdebug & PDB_FOLLOW) 421 printf("pmap_reference(%x)\n", pmap); 422 #endif 423 if (pmap != NULL) { 424 simple_lock(&pmap->pm_lock); 425 pmap->pm_count++; 426 simple_unlock(&pmap->pm_lock); 427 } 428 } 429 430 /* 431 * Remove the given range of addresses from the specified map. 432 * 433 * It is assumed that the start and end are properly 434 * rounded to the page size. 435 */ 436 void 437 pmap_remove(pmap, sva, eva) 438 register pmap_t pmap; 439 vm_offset_t sva, eva; 440 { 441 register vm_offset_t va; 442 register pv_entry_t pv, npv; 443 pmap_hash_t hp; 444 unsigned entry; 445 446 #ifdef DEBUG 447 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 448 printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva); 449 remove_stats.calls++; 450 #endif 451 if (pmap == NULL) 452 return; 453 454 /* anything in the cache? */ 455 if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash) 456 return; 457 458 if (!pmap->pm_hash) { 459 register pt_entry_t *pte; 460 461 /* remove entries from kernel pmap */ 462 #ifdef DIAGNOSTIC 463 if (sva < VM_MIN_KERNEL_ADDRESS || 464 eva > VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG) 465 panic("pmap_remove"); 466 #endif 467 pte = kvtopte(sva); 468 for (va = sva; va < eva; va += NBPG, pte++) { 469 entry = pte->pt_entry; 470 if (!(entry & PG_V)) 471 continue; 472 if (entry & PG_WIRED) 473 pmap->pm_stats.wired_count--; 474 pmap->pm_stats.resident_count--; 475 pmap_remove_pv(pmap, va, entry & PG_FRAME); 476 #ifdef ATTR 477 pmap_attributes[atop(entry - KERNBASE)] = 0; 478 #endif 479 pte->pt_entry = PG_NV; 480 /* 481 * Flush the TLB for the given address. 482 */ 483 MachTLBFlushAddr(va); 484 #ifdef DEBUG 485 remove_stats.flushes++; 486 #endif 487 } 488 return; 489 } 490 491 va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 492 eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 493 /* 494 * If we are not in the current address space, just flush the 495 * software cache and not the hardware. 496 */ 497 if (pmap != cur_pmap) { 498 for (; va < eva; va += NBPG) { 499 hp = &pmap->pm_hash[PMAP_HASH(va)]; 500 if (hp->high != va) 501 continue; 502 503 hp->high = 0; 504 entry = hp->low; 505 if (entry & PG_WIRED) 506 pmap->pm_stats.wired_count--; 507 pmap->pm_stats.resident_count--; 508 pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME); 509 #ifdef ATTR 510 pmap_attributes[atop(entry - KERNBASE)] = 0; 511 #endif 512 pmap->pm_flags |= PM_MODIFIED; 513 #ifdef DEBUG 514 remove_stats.removes++; 515 #endif 516 } 517 return; 518 } 519 520 for (; va < eva; va += NBPG) { 521 hp = &pmap->pm_hash[PMAP_HASH(va)]; 522 if (hp->high != va) 523 continue; 524 525 hp->high = 0; 526 entry = hp->low; 527 if (entry & PG_WIRED) 528 pmap->pm_stats.wired_count--; 529 pmap->pm_stats.resident_count--; 530 pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME); 531 #ifdef ATTR 532 pmap_attributes[atop(entry - KERNBASE)] = 0; 533 #endif 534 /* 535 * Flush the TLB for the given address. 536 */ 537 MachTLBFlushAddr(va); 538 #ifdef DEBUG 539 remove_stats.flushes++; 540 #endif 541 } 542 } 543 544 /* 545 * pmap_page_protect: 546 * 547 * Lower the permission for all mappings to a given page. 548 */ 549 void 550 pmap_page_protect(pa, prot) 551 vm_offset_t pa; 552 vm_prot_t prot; 553 { 554 register pv_entry_t pv; 555 register vm_offset_t va; 556 int s; 557 558 #ifdef DEBUG 559 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || 560 prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)) 561 printf("pmap_page_protect(%x, %x)\n", pa, prot); 562 #endif 563 if (!IS_VM_PHYSADDR(pa)) 564 return; 565 566 switch (prot) { 567 case VM_PROT_ALL: 568 break; 569 570 /* copy_on_write */ 571 case VM_PROT_READ: 572 case VM_PROT_READ|VM_PROT_EXECUTE: 573 pv = pa_to_pvh(pa); 574 s = splimp(); 575 /* 576 * Loop over all current mappings setting/clearing as appropos. 577 */ 578 if (pv->pv_pmap != NULL) { 579 for (; pv; pv = pv->pv_next) { 580 extern vm_offset_t pager_sva, pager_eva; 581 va = pv->pv_va; 582 583 /* 584 * XXX don't write protect pager mappings 585 */ 586 if (va >= pager_sva && va < pager_eva) 587 continue; 588 pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE, 589 prot); 590 } 591 } 592 splx(s); 593 break; 594 595 /* remove_all */ 596 default: 597 pv = pa_to_pvh(pa); 598 s = splimp(); 599 while (pv->pv_pmap != NULL) { 600 pmap_remove(pv->pv_pmap, pv->pv_va, 601 pv->pv_va + PAGE_SIZE); 602 } 603 splx(s); 604 } 605 } 606 607 /* 608 * Set the physical protection on the 609 * specified range of this map as requested. 610 */ 611 void 612 pmap_protect(pmap, sva, eva, prot) 613 register pmap_t pmap; 614 vm_offset_t sva, eva; 615 vm_prot_t prot; 616 { 617 register vm_offset_t va; 618 pmap_hash_t hp; 619 u_int p; 620 621 #ifdef DEBUG 622 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 623 printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot); 624 #endif 625 if (pmap == NULL) 626 return; 627 628 /* anything in the software cache? */ 629 if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash) 630 return; 631 632 if (!(prot & VM_PROT_READ)) { 633 pmap_remove(pmap, sva, eva); 634 return; 635 } 636 637 if (!pmap->pm_hash) { 638 register pt_entry_t *pte; 639 640 /* 641 * Change entries in kernel pmap. 642 * This will trap if the page is writeable (in order to set 643 * the dirty bit) even if the dirty bit is already set. The 644 * optimization isn't worth the effort since this code isn't 645 * executed much. The common case is to make a user page 646 * read-only. 647 */ 648 #ifdef DIAGNOSTIC 649 if (sva < VM_MIN_KERNEL_ADDRESS || 650 eva > VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG) 651 panic("pmap_protect"); 652 #endif 653 p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO; 654 pte = kvtopte(sva); 655 for (va = sva; va < eva; va += NBPG, pte++) { 656 if (!(pte->pt_entry & PG_V)) 657 continue; 658 pte->pt_entry = (pte->pt_entry & ~(PG_M | PG_RO)) | p; 659 /* 660 * Update the TLB if the given address is in the cache. 661 */ 662 MachTLBUpdate(va, pte->pt_entry); 663 } 664 return; 665 } 666 667 p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO; 668 va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 669 eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 670 /* 671 * If we are not in the current address space, just flush the 672 * software cache and not the hardware. 673 */ 674 if (pmap != cur_pmap) { 675 for (; va < eva; va += NBPG) { 676 hp = &pmap->pm_hash[PMAP_HASH(va)]; 677 if (hp->high != va) 678 continue; 679 680 hp->low = (hp->low & ~(PG_M | PG_RO)) | p; 681 pmap->pm_flags |= PM_MODIFIED; 682 } 683 return; 684 } 685 686 for (; va < eva; va += NBPG) { 687 hp = &pmap->pm_hash[PMAP_HASH(va)]; 688 if (hp->high != va) 689 continue; 690 691 hp->low = (hp->low & ~(PG_M | PG_RO)) | p; 692 /* 693 * Update the TLB if the given address is in the cache. 694 */ 695 MachTLBUpdate(hp->high, hp->low); 696 } 697 } 698 699 /* 700 * Insert the given physical page (p) at 701 * the specified virtual address (v) in the 702 * target physical map with the protection requested. 703 * 704 * If specified, the page will be wired down, meaning 705 * that the related pte can not be reclaimed. 706 * 707 * NB: This is the only routine which MAY NOT lazy-evaluate 708 * or lose information. That is, this routine must actually 709 * insert this page into the given map NOW. 710 */ 711 void 712 pmap_enter(pmap, va, pa, prot, wired) 713 register pmap_t pmap; 714 vm_offset_t va; 715 register vm_offset_t pa; 716 vm_prot_t prot; 717 boolean_t wired; 718 { 719 register pmap_hash_t hp; 720 register u_int npte; 721 register int i; 722 723 #ifdef DEBUG 724 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 725 printf("pmap_enter(%x, %x, %x, %x, %x)\n", 726 pmap, va, pa, prot, wired); 727 #endif 728 #ifdef DIAGNOSTIC 729 if (!pmap) 730 panic("pmap_enter: pmap"); 731 if (pmap->pm_tlbpid < 0) 732 panic("pmap_enter: tlbpid"); 733 if (!pmap->pm_hash) { 734 enter_stats.kernel++; 735 if (va < VM_MIN_KERNEL_ADDRESS || 736 va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG) 737 panic("pmap_enter: kva"); 738 } else { 739 enter_stats.user++; 740 if (va & 0x80000000) 741 panic("pmap_enter: uva"); 742 } 743 if (pa & 0x80000000) 744 panic("pmap_enter: pa"); 745 if (!(prot & VM_PROT_READ)) 746 panic("pmap_enter: prot"); 747 #endif 748 749 /* 750 * See if we need to create a new TLB cache. 751 */ 752 if (pmap->pm_hash == zero_pmap_hash) { 753 register vm_offset_t kva; 754 register pt_entry_t *pte; 755 756 kva = kmem_alloc(kernel_map, PMAP_HASH_SIZE); 757 pmap->pm_hash = (pmap_hash_t)kva; 758 759 /* 760 * Convert the kernel virtual address to a physical one 761 * and cache it in the pmap. Note: if the phyical address 762 * can change (due to memory compaction in kmem_alloc?), 763 * we will have to update things. 764 */ 765 pte = kvtopte(kva); 766 for (i = 0; i < PMAP_HASH_UPAGES; i++) { 767 pmap->pm_hash_ptes[i] = pte->pt_entry & ~PG_G; 768 pte++; 769 } 770 771 /* 772 * Map in new TLB cache if it is current. 773 */ 774 if (pmap == cur_pmap) { 775 for (i = 0; i < PMAP_HASH_UPAGES; i++) { 776 MachTLBWriteIndexed(i + UPAGES, 777 (PMAP_HASH_UADDR + (i << PGSHIFT)) | 778 (pmap->pm_tlbpid << 779 VMMACH_TLB_PID_SHIFT), 780 pmap->pm_hash_ptes[i]); 781 } 782 } 783 #ifdef DIAGNOSTIC 784 for (i = 0; i < PAGE_SIZE; i += sizeof(int), kva += sizeof(int)) 785 if (*(int *)kva != 0) 786 panic("pmap_enter: *kva != 0"); 787 #endif 788 } 789 790 if (IS_VM_PHYSADDR(pa)) { 791 register pv_entry_t pv, npv; 792 int s; 793 794 if (!(prot & VM_PROT_WRITE)) 795 npte = PG_RO; 796 else { 797 register vm_page_t mem; 798 799 mem = PHYS_TO_VM_PAGE(pa); 800 if ((int)va < 0) { 801 /* 802 * Don't bother to trap on kernel writes, 803 * just record page as dirty. 804 */ 805 npte = PG_M; 806 mem->clean = FALSE; 807 } else 808 #ifdef ATTR 809 if ((pmap_attributes[atop(pa - KERNBASE)] & 810 PMAP_ATTR_MOD) || !mem->clean) 811 #else 812 if (!mem->clean) 813 #endif 814 npte = PG_M; 815 else 816 npte = 0; 817 } 818 819 #ifdef DEBUG 820 enter_stats.managed++; 821 #endif 822 /* 823 * Enter the pmap and virtual address into the 824 * physical to virtual map table. 825 */ 826 pv = pa_to_pvh(pa); 827 s = splimp(); 828 #ifdef DEBUG 829 if (pmapdebug & PDB_ENTER) 830 printf("pmap_enter: pv %x: was %x/%x/%x\n", 831 pv, pv->pv_va, pv->pv_pmap, pv->pv_next); 832 #endif 833 if (pv->pv_pmap == NULL) { 834 /* 835 * No entries yet, use header as the first entry 836 */ 837 #ifdef DEBUG 838 enter_stats.firstpv++; 839 #endif 840 pv->pv_va = va; 841 pv->pv_pmap = pmap; 842 pv->pv_next = NULL; 843 } else { 844 /* 845 * There is at least one other VA mapping this page. 846 * Place this entry after the header. 847 * 848 * Note: the entry may already be in the table if 849 * we are only changing the protection bits. 850 */ 851 for (npv = pv; npv; npv = npv->pv_next) 852 if (pmap == npv->pv_pmap && va == npv->pv_va) { 853 #ifdef DIAGNOSTIC 854 if (!pmap->pm_hash) { 855 unsigned entry; 856 857 entry = kvtopte(va)->pt_entry; 858 if (!(entry & PG_V) || 859 (entry & PG_FRAME) != pa) 860 printf("found kva %x pa %x in pv_table but != %x\n", 861 va, pa, entry); 862 } else { 863 hp = &pmap->pm_hash[PMAP_HASH(va)]; 864 if (hp->high != (va | 865 (pmap->pm_tlbpid << 866 VMMACH_TLB_PID_SHIFT)) || 867 (hp->low & PG_FRAME) != pa) 868 printf("found va %x pa %x in pv_table but != %x %x\n", 869 va, pa, hp->high, hp->low); 870 } 871 #endif 872 goto fnd; 873 } 874 /* can this cause us to recurse forever? */ 875 npv = (pv_entry_t) 876 malloc(sizeof *npv, M_VMPVENT, M_NOWAIT); 877 npv->pv_va = va; 878 npv->pv_pmap = pmap; 879 npv->pv_next = pv->pv_next; 880 pv->pv_next = npv; 881 #ifdef DEBUG 882 if (!npv->pv_next) 883 enter_stats.secondpv++; 884 #endif 885 fnd: 886 ; 887 } 888 splx(s); 889 } else { 890 /* 891 * Assumption: if it is not part of our managed memory 892 * then it must be device memory which may be volitile. 893 */ 894 #ifdef DEBUG 895 enter_stats.unmanaged++; 896 #endif 897 printf("pmap_enter: UNMANAGED ADDRESS va %x pa %x\n", 898 va, pa); /* XXX */ 899 npte = (prot & VM_PROT_WRITE) ? PG_M : PG_RO; 900 } 901 902 /* 903 * The only time we need to flush the cache is if we 904 * execute from a physical address and then change the data. 905 * This is the best place to do this. 906 * pmap_protect() and pmap_remove() are mostly used to switch 907 * between R/W and R/O pages. 908 * NOTE: we only support cache flush for read only text. 909 */ 910 #if 0 911 if (prot == (VM_PROT_READ | VM_PROT_EXECUTE)) 912 MachFlushICache(MACH_PHYS_TO_UNCACHED(pa), PAGE_SIZE); 913 #endif 914 915 if (!pmap->pm_hash) { 916 register pt_entry_t *pte; 917 918 /* enter entries into kernel pmap */ 919 pte = kvtopte(va); 920 npte |= pa | PG_V | PG_G; 921 if (wired) { 922 pmap->pm_stats.wired_count += pmaxpagesperpage; 923 npte |= PG_WIRED; 924 } 925 i = pmaxpagesperpage; 926 do { 927 if (!(pte->pt_entry & PG_V)) { 928 pmap->pm_stats.resident_count++; 929 MachTLBWriteRandom(va, npte); 930 } else { 931 #ifdef DIAGNOSTIC 932 if (pte->pt_entry & PG_WIRED) 933 panic("pmap_enter: kernel wired"); 934 #endif 935 /* 936 * Update the same virtual address entry. 937 */ 938 MachTLBUpdate(va, npte); 939 printf("TLB update kva %x pte %x -> %x\n", 940 va, pte->pt_entry, npte); /* XXX */ 941 } 942 pte->pt_entry = npte; 943 va += NBPG; 944 npte += NBPG; 945 pte++; 946 } while (--i != 0); 947 return; 948 } 949 950 /* 951 * Now validate mapping with desired protection/wiring. 952 * Assume uniform modified and referenced status for all 953 * PMAX pages in a MACH page. 954 */ 955 npte |= pa | PG_V; 956 if (wired) { 957 pmap->pm_stats.wired_count += pmaxpagesperpage; 958 npte |= PG_WIRED; 959 } 960 #ifdef DEBUG 961 if (pmapdebug & PDB_ENTER) 962 printf("pmap_enter: new pte value %x\n", npte); 963 #endif 964 va |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 965 i = pmaxpagesperpage; 966 do { 967 hp = &pmap->pm_hash[PMAP_HASH(va)]; 968 if (!hp->high) { 969 pmap->pm_stats.resident_count++; 970 hp->high = va; 971 hp->low = npte; 972 MachTLBWriteRandom(va, npte); 973 } else { 974 #ifdef DEBUG 975 enter_stats.cachehit++; 976 #endif 977 if (!(hp->low & PG_WIRED)) { 978 if (hp->high == va && 979 (hp->low & PG_FRAME) == (npte & PG_FRAME)) { 980 /* 981 * Update the same entry. 982 */ 983 hp->low = npte; 984 MachTLBUpdate(va, npte); 985 } else { 986 MachTLBFlushAddr(hp->high); 987 pmap_remove_pv(pmap, 988 hp->high & PG_FRAME, 989 hp->low & PG_FRAME); 990 hp->high = va; 991 hp->low = npte; 992 MachTLBWriteRandom(va, npte); 993 } 994 } else { 995 /* 996 * Don't replace wired entries, just update 997 * the hardware TLB. 998 * Bug: routines to flush the TLB won't know 999 * that the entry is in the hardware. 1000 */ 1001 printf("pmap_enter: wired va %x %x\n", va, 1002 hp->low); /* XXX */ 1003 panic("pmap_enter: wired"); /* XXX */ 1004 MachTLBWriteRandom(va, npte); 1005 } 1006 } 1007 va += NBPG; 1008 npte += NBPG; 1009 } while (--i != 0); 1010 } 1011 1012 /* 1013 * Routine: pmap_change_wiring 1014 * Function: Change the wiring attribute for a map/virtual-address 1015 * pair. 1016 * In/out conditions: 1017 * The mapping must already exist in the pmap. 1018 */ 1019 void 1020 pmap_change_wiring(pmap, va, wired) 1021 register pmap_t pmap; 1022 vm_offset_t va; 1023 boolean_t wired; 1024 { 1025 register pmap_hash_t hp; 1026 u_int p; 1027 int i; 1028 1029 #ifdef DEBUG 1030 if (pmapdebug & PDB_FOLLOW) 1031 printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired); 1032 #endif 1033 if (pmap == NULL) 1034 return; 1035 1036 p = wired ? PG_WIRED : 0; 1037 1038 /* 1039 * Don't need to flush the TLB since PG_WIRED is only in software. 1040 */ 1041 if (!pmap->pm_hash) { 1042 register pt_entry_t *pte; 1043 1044 /* change entries in kernel pmap */ 1045 #ifdef DIAGNOSTIC 1046 if (va < VM_MIN_KERNEL_ADDRESS || 1047 va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG) 1048 panic("pmap_change_wiring"); 1049 #endif 1050 pte = kvtopte(va); 1051 i = pmaxpagesperpage; 1052 if (!(pte->pt_entry & PG_WIRED) && p) 1053 pmap->pm_stats.wired_count += i; 1054 else if ((pte->pt_entry & PG_WIRED) && !p) 1055 pmap->pm_stats.wired_count -= i; 1056 do { 1057 if (!(pte->pt_entry & PG_V)) 1058 continue; 1059 pte->pt_entry = (pte->pt_entry & ~PG_WIRED) | p; 1060 pte++; 1061 } while (--i != 0); 1062 } else if (pmap->pm_tlbpid >= 0 && pmap->pm_hash != zero_pmap_hash) { 1063 i = pmaxpagesperpage; 1064 do { 1065 hp = &pmap->pm_hash[PMAP_HASH(va)]; 1066 if (!hp->high) 1067 continue; 1068 if (!(hp->low & PG_WIRED) && p) 1069 pmap->pm_stats.wired_count++; 1070 else if ((hp->low & PG_WIRED) && !p) 1071 pmap->pm_stats.wired_count--; 1072 hp->low = (hp->low & ~PG_WIRED) | p; 1073 va += NBPG; 1074 } while (--i != 0); 1075 } 1076 } 1077 1078 /* 1079 * Routine: pmap_extract 1080 * Function: 1081 * Extract the physical page address associated 1082 * with the given map/virtual_address pair. 1083 */ 1084 vm_offset_t 1085 pmap_extract(pmap, va) 1086 register pmap_t pmap; 1087 vm_offset_t va; 1088 { 1089 register vm_offset_t pa; 1090 register pmap_hash_t hp; 1091 1092 #ifdef DEBUG 1093 if (pmapdebug & PDB_FOLLOW) 1094 printf("pmap_extract(%x, %x) -> ", pmap, va); 1095 #endif 1096 1097 if (!pmap->pm_hash) { 1098 #ifdef DIAGNOSTIC 1099 if (va < VM_MIN_KERNEL_ADDRESS || 1100 va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG) 1101 panic("pmap_extract"); 1102 #endif 1103 pa = kvtopte(va)->pt_entry & PG_FRAME; 1104 } else if (pmap->pm_tlbpid >= 0) { 1105 hp = &pmap->pm_hash[PMAP_HASH(va)]; 1106 if (hp->high) 1107 pa = hp->low & PG_FRAME; 1108 else 1109 pa = 0; 1110 } else 1111 pa = 0; 1112 1113 #ifdef DEBUG 1114 if (pmapdebug & PDB_FOLLOW) 1115 printf("%x\n", pa); 1116 #endif 1117 return (pa); 1118 } 1119 1120 /* 1121 * Copy the range specified by src_addr/len 1122 * from the source map to the range dst_addr/len 1123 * in the destination map. 1124 * 1125 * This routine is only advisory and need not do anything. 1126 */ 1127 void 1128 pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 1129 pmap_t dst_pmap; 1130 pmap_t src_pmap; 1131 vm_offset_t dst_addr; 1132 vm_size_t len; 1133 vm_offset_t src_addr; 1134 { 1135 1136 #ifdef DEBUG 1137 if (pmapdebug & PDB_FOLLOW) 1138 printf("pmap_copy(%x, %x, %x, %x, %x)\n", 1139 dst_pmap, src_pmap, dst_addr, len, src_addr); 1140 #endif 1141 } 1142 1143 /* 1144 * Require that all active physical maps contain no 1145 * incorrect entries NOW. [This update includes 1146 * forcing updates of any address map caching.] 1147 * 1148 * Generally used to insure that a thread about 1149 * to run will see a semantically correct world. 1150 */ 1151 void 1152 pmap_update() 1153 { 1154 1155 #ifdef DEBUG 1156 if (pmapdebug & PDB_FOLLOW) 1157 printf("pmap_update()\n"); 1158 #endif 1159 } 1160 1161 /* 1162 * Routine: pmap_collect 1163 * Function: 1164 * Garbage collects the physical map system for 1165 * pages which are no longer used. 1166 * Success need not be guaranteed -- that is, there 1167 * may well be pages which are not referenced, but 1168 * others may be collected. 1169 * Usage: 1170 * Called by the pageout daemon when pages are scarce. 1171 */ 1172 void 1173 pmap_collect(pmap) 1174 pmap_t pmap; 1175 { 1176 1177 #ifdef DEBUG 1178 if (pmapdebug & PDB_FOLLOW) 1179 printf("pmap_collect(%x)\n", pmap); 1180 #endif 1181 } 1182 1183 /* 1184 * pmap_zero_page zeros the specified (machine independent) 1185 * page. 1186 */ 1187 void 1188 pmap_zero_page(phys) 1189 vm_offset_t phys; 1190 { 1191 register int *p, *end; 1192 1193 #ifdef DEBUG 1194 if (pmapdebug & PDB_FOLLOW) 1195 printf("pmap_zero_page(%x)\n", phys); 1196 #endif 1197 p = (int *)MACH_PHYS_TO_CACHED(phys); 1198 end = p + PAGE_SIZE / sizeof(int); 1199 do { 1200 p[0] = 0; 1201 p[1] = 0; 1202 p[2] = 0; 1203 p[3] = 0; 1204 p += 4; 1205 } while (p != end); 1206 } 1207 1208 /* 1209 * pmap_copy_page copies the specified (machine independent) 1210 * page. 1211 */ 1212 void 1213 pmap_copy_page(src, dst) 1214 vm_offset_t src, dst; 1215 { 1216 register int *s, *d, *end; 1217 register int tmp0, tmp1, tmp2, tmp3; 1218 1219 #ifdef DEBUG 1220 if (pmapdebug & PDB_FOLLOW) 1221 printf("pmap_copy_page(%x, %x)\n", src, dst); 1222 #endif 1223 s = (int *)MACH_PHYS_TO_CACHED(src); 1224 d = (int *)MACH_PHYS_TO_CACHED(dst); 1225 end = s + PAGE_SIZE / sizeof(int); 1226 do { 1227 tmp0 = s[0]; 1228 tmp1 = s[1]; 1229 tmp2 = s[2]; 1230 tmp3 = s[3]; 1231 d[0] = tmp0; 1232 d[1] = tmp1; 1233 d[2] = tmp2; 1234 d[3] = tmp3; 1235 s += 4; 1236 d += 4; 1237 } while (s != end); 1238 } 1239 1240 /* 1241 * Routine: pmap_pageable 1242 * Function: 1243 * Make the specified pages (by pmap, offset) 1244 * pageable (or not) as requested. 1245 * 1246 * A page which is not pageable may not take 1247 * a fault; therefore, its page table entry 1248 * must remain valid for the duration. 1249 * 1250 * This routine is merely advisory; pmap_enter 1251 * will specify that these pages are to be wired 1252 * down (or not) as appropriate. 1253 */ 1254 void 1255 pmap_pageable(pmap, sva, eva, pageable) 1256 pmap_t pmap; 1257 vm_offset_t sva, eva; 1258 boolean_t pageable; 1259 { 1260 1261 #ifdef DEBUG 1262 if (pmapdebug & PDB_FOLLOW) 1263 printf("pmap_pageable(%x, %x, %x, %x)\n", 1264 pmap, sva, eva, pageable); 1265 #endif 1266 } 1267 1268 /* 1269 * Clear the modify bits on the specified physical page. 1270 */ 1271 void 1272 pmap_clear_modify(pa) 1273 vm_offset_t pa; 1274 { 1275 pmap_hash_t hp; 1276 1277 #ifdef DEBUG 1278 if (pmapdebug & PDB_FOLLOW) 1279 printf("pmap_clear_modify(%x)\n", pa); 1280 #endif 1281 #ifdef ATTR 1282 pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_MOD; 1283 #endif 1284 } 1285 1286 /* 1287 * pmap_clear_reference: 1288 * 1289 * Clear the reference bit on the specified physical page. 1290 */ 1291 void 1292 pmap_clear_reference(pa) 1293 vm_offset_t pa; 1294 { 1295 1296 #ifdef DEBUG 1297 if (pmapdebug & PDB_FOLLOW) 1298 printf("pmap_clear_reference(%x)\n", pa); 1299 #endif 1300 #ifdef ATTR 1301 pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_REF; 1302 #endif 1303 } 1304 1305 /* 1306 * pmap_is_referenced: 1307 * 1308 * Return whether or not the specified physical page is referenced 1309 * by any physical maps. 1310 */ 1311 boolean_t 1312 pmap_is_referenced(pa) 1313 vm_offset_t pa; 1314 { 1315 #ifdef ATTR 1316 return (pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_REF); 1317 #else 1318 return (FALSE); 1319 #endif 1320 } 1321 1322 /* 1323 * pmap_is_modified: 1324 * 1325 * Return whether or not the specified physical page is modified 1326 * by any physical maps. 1327 */ 1328 boolean_t 1329 pmap_is_modified(pa) 1330 vm_offset_t pa; 1331 { 1332 #ifdef ATTR 1333 return (pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_MOD); 1334 #else 1335 return (FALSE); 1336 #endif 1337 } 1338 1339 vm_offset_t 1340 pmap_phys_address(ppn) 1341 int ppn; 1342 { 1343 1344 #ifdef DEBUG 1345 if (pmapdebug & PDB_FOLLOW) 1346 printf("pmap_phys_address(%x)\n", ppn); 1347 #endif 1348 panic("pmap_phys_address"); /* XXX */ 1349 return (pmax_ptob(ppn)); 1350 } 1351 1352 /* 1353 * Miscellaneous support routines 1354 */ 1355 1356 /* 1357 * Allocate a hardware PID and return it. 1358 * Also, change the hardwired TLB entry to point to the current TLB cache. 1359 * This is called by swtch(). 1360 */ 1361 int 1362 pmap_alloc_tlbpid(p) 1363 register struct proc *p; 1364 { 1365 register pmap_t pmap; 1366 register u_int i; 1367 register int id; 1368 1369 pmap = &p->p_vmspace->vm_pmap; 1370 if ((id = pmap->pm_tlbpid) >= 0) { 1371 if (pmap->pm_flags & PM_MODIFIED) { 1372 pmap->pm_flags &= ~PM_MODIFIED; 1373 MachTLBFlushPID(id); 1374 } 1375 goto done; 1376 } 1377 1378 if ((i = whichpids[0]) != 0xFFFFFFFF) 1379 id = 0; 1380 else if ((i = whichpids[1]) != 0xFFFFFFFF) 1381 id = 32; 1382 else { 1383 register struct proc *q; 1384 register pmap_t q_pmap; 1385 1386 /* 1387 * Have to find a tlbpid to recycle. 1388 * There is probably a better way to do this. 1389 */ 1390 for (q = allproc; q != NULL; q = q->p_nxt) { 1391 q_pmap = &q->p_vmspace->vm_pmap; 1392 if ((id = q_pmap->pm_tlbpid) < 0) 1393 continue; 1394 if (q->p_stat != SRUN) 1395 goto fnd; 1396 } 1397 if (id < 0) 1398 panic("TLBPidAlloc"); 1399 fnd: 1400 printf("pmap_alloc_tlbpid: recycle pid %d (%s) tlbpid %d\n", 1401 q->p_pid, q->p_comm, id); /* XXX */ 1402 /* 1403 * Even though the virtual to physical mapping hasn't changed, 1404 * we need to clear the PID tag in the high entry of the cache. 1405 */ 1406 if (q_pmap->pm_hash != zero_pmap_hash) { 1407 register pmap_hash_t hp; 1408 1409 hp = q_pmap->pm_hash; 1410 for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) { 1411 if (!hp->high) 1412 continue; 1413 1414 if (hp->low & PG_WIRED) { 1415 printf("Clearing wired user entry! h %x l %x\n", hp->high, hp->low); 1416 panic("pmap_alloc_tlbpid: wired"); 1417 } 1418 pmap_remove_pv(pmap, hp->high & PG_FRAME, 1419 hp->low & PG_FRAME); 1420 hp->high = 0; 1421 q_pmap->pm_stats.resident_count--; 1422 } 1423 } 1424 q_pmap->pm_tlbpid = -1; 1425 MachTLBFlushPID(id); 1426 #ifdef DEBUG 1427 remove_stats.pidflushes++; 1428 #endif 1429 pmap->pm_tlbpid = id; 1430 goto done; 1431 } 1432 while (i & 1) { 1433 i >>= 1; 1434 id++; 1435 } 1436 whichpids[id >> 5] |= 1 << (id & 0x1F); 1437 pmap->pm_tlbpid = id; 1438 done: 1439 /* 1440 * Map in new TLB cache. 1441 */ 1442 if (pmap == cur_pmap) 1443 return (id); 1444 cur_pmap = pmap; 1445 for (i = 0; i < PMAP_HASH_UPAGES; i++) { 1446 MachTLBWriteIndexed(i + UPAGES, 1447 (PMAP_HASH_UADDR + (i << PGSHIFT)) | 1448 (id << VMMACH_TLB_PID_SHIFT), 1449 pmap->pm_hash_ptes[i]); 1450 } 1451 return (id); 1452 } 1453 1454 /* 1455 * Remove a physical to virtual address translation. 1456 */ 1457 void 1458 pmap_remove_pv(pmap, va, pa) 1459 pmap_t pmap; 1460 vm_offset_t va, pa; 1461 { 1462 register pv_entry_t pv, npv; 1463 int s; 1464 1465 #ifdef DEBUG 1466 if (pmapdebug & PDB_FOLLOW) 1467 printf("pmap_remove_pv(%x, %x, %x)\n", pmap, va, pa); 1468 #endif 1469 /* 1470 * Remove page from the PV table (raise IPL since we 1471 * may be called at interrupt time). 1472 */ 1473 if (!IS_VM_PHYSADDR(pa)) 1474 return; 1475 pv = pa_to_pvh(pa); 1476 s = splimp(); 1477 /* 1478 * If it is the first entry on the list, it is actually 1479 * in the header and we must copy the following entry up 1480 * to the header. Otherwise we must search the list for 1481 * the entry. In either case we free the now unused entry. 1482 */ 1483 if (pmap == pv->pv_pmap && va == pv->pv_va) { 1484 npv = pv->pv_next; 1485 if (npv) { 1486 *pv = *npv; 1487 free((caddr_t)npv, M_VMPVENT); 1488 } else 1489 pv->pv_pmap = NULL; 1490 #ifdef DEBUG 1491 remove_stats.pvfirst++; 1492 #endif 1493 } else { 1494 for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) { 1495 #ifdef DEBUG 1496 remove_stats.pvsearch++; 1497 #endif 1498 if (pmap == npv->pv_pmap && va == npv->pv_va) 1499 goto fnd; 1500 } 1501 #ifdef DIAGNOSTIC 1502 printf("pmap_remove_pv(%x, %x, %x) not found\n", pmap, va, pa); 1503 panic("pmap_remove_pv"); 1504 #endif 1505 fnd: 1506 pv->pv_next = npv->pv_next; 1507 free((caddr_t)npv, M_VMPVENT); 1508 } 1509 splx(s); 1510 } 1511 1512 #ifdef DEBUG 1513 pmap_print(pmap) 1514 pmap_t pmap; 1515 { 1516 register pmap_hash_t hp; 1517 register int i; 1518 1519 printf("\tpmap_print(%x)\n", pmap); 1520 1521 if (pmap->pm_hash == zero_pmap_hash) { 1522 printf("pm_hash == zero\n"); 1523 return; 1524 } 1525 if (pmap->pm_hash == (pmap_hash_t)0) { 1526 printf("pm_hash == kernel\n"); 1527 return; 1528 } 1529 hp = pmap->pm_hash; 1530 for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) { 1531 if (!hp->high) 1532 continue; 1533 printf("%d: hi %x low %x\n", i, hp->high, hp->low); 1534 } 1535 } 1536 #endif 1537