1 /* 2 * Copyright (c) 1992 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department and Ralph Campbell. 8 * 9 * %sccs.include.redist.c% 10 * 11 * @(#)pmap.c 7.10 (Berkeley) 07/27/92 12 */ 13 14 /* 15 * Manages physical address maps. 16 * 17 * In addition to hardware address maps, this 18 * module is called upon to provide software-use-only 19 * maps which may or may not be stored in the same 20 * form as hardware maps. These pseudo-maps are 21 * used to store intermediate results from copy 22 * operations to and from address spaces. 23 * 24 * Since the information managed by this module is 25 * also stored by the logical address mapping module, 26 * this module may throw away valid virtual-to-physical 27 * mappings at almost any time. However, invalidations 28 * of virtual-to-physical mappings must be done as 29 * requested. 30 * 31 * In order to cope with hardware architectures which 32 * make virtual-to-physical map invalidates expensive, 33 * this module may delay invalidate or reduced protection 34 * operations until such time as they are actually 35 * necessary. This module is given full information as 36 * to which processors are currently using which maps, 37 * and to when physical maps must be made correct. 38 */ 39 40 #include "param.h" 41 #include "proc.h" 42 #include "malloc.h" 43 #include "user.h" 44 45 #include "vm/vm.h" 46 #include "vm/vm_kern.h" 47 #include "vm/vm_page.h" 48 49 #include "../include/machConst.h" 50 #include "../include/pte.h" 51 52 /* 53 * For each vm_page_t, there is a list of all currently valid virtual 54 * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 55 * XXX really should do this as a part of the higher level code. 56 */ 57 typedef struct pv_entry { 58 struct pv_entry *pv_next; /* next pv_entry */ 59 struct pmap *pv_pmap; /* pmap where mapping lies */ 60 vm_offset_t pv_va; /* virtual address for mapping */ 61 } *pv_entry_t; 62 63 pv_entry_t pv_table; /* array of entries, one per page */ 64 extern void pmap_remove_pv(); 65 66 #define pa_index(pa) atop((pa) - first_phys_addr) 67 #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) 68 69 #ifdef DEBUG 70 struct { 71 int kernel; /* entering kernel mapping */ 72 int user; /* entering user mapping */ 73 int ptpneeded; /* needed to allocate a PT page */ 74 int pwchange; /* no mapping change, just wiring or protection */ 75 int wchange; /* no mapping change, just wiring */ 76 int mchange; /* was mapped but mapping to different page */ 77 int managed; /* a managed page */ 78 int firstpv; /* first mapping for this PA */ 79 int secondpv; /* second mapping for this PA */ 80 int ci; /* cache inhibited */ 81 int unmanaged; /* not a managed page */ 82 int flushes; /* cache flushes */ 83 int cachehit; /* new entry forced valid entry out */ 84 } enter_stats; 85 struct { 86 int calls; 87 int removes; 88 int flushes; 89 int pidflushes; /* HW pid stolen */ 90 int pvfirst; 91 int pvsearch; 92 } remove_stats; 93 94 int pmapdebug; 95 #define PDB_FOLLOW 0x0001 96 #define PDB_INIT 0x0002 97 #define PDB_ENTER 0x0004 98 #define PDB_REMOVE 0x0008 99 #define PDB_CREATE 0x0010 100 #define PDB_PTPAGE 0x0020 101 #define PDB_CACHE 0x0040 102 #define PDB_BITS 0x0080 103 #define PDB_COLLECT 0x0100 104 #define PDB_PROTECT 0x0200 105 #define PDB_TLBPID 0x0400 106 #define PDB_PARANOIA 0x2000 107 #define PDB_WIRING 0x4000 108 #define PDB_PVDUMP 0x8000 109 110 #endif /* DEBUG */ 111 112 u_int whichpids[2] = { /* bit mask of hardware PID's in use */ 113 3, 0 114 }; 115 116 struct pmap kernel_pmap_store; 117 pmap_t cur_pmap; /* current pmap mapped in hardware */ 118 119 vm_offset_t avail_start; /* PA of first available physical page */ 120 vm_offset_t avail_end; /* PA of last available physical page */ 121 vm_size_t mem_size; /* memory size in bytes */ 122 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/ 123 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 124 int pmaxpagesperpage; /* PAGE_SIZE / NBPG */ 125 #ifdef ATTR 126 char *pmap_attributes; /* reference and modify bits */ 127 #endif 128 pmap_hash_t zero_pmap_hash; /* empty TLB hash table for init */ 129 130 /* 131 * Bootstrap the system enough to run with virtual memory. 132 */ 133 void 134 pmap_bootstrap(firstaddr) 135 vm_offset_t firstaddr; 136 { 137 register int i; 138 vm_offset_t start = firstaddr; 139 extern int maxmem, physmem; 140 141 /* 142 * Allocate a TLB hash table for the kernel. 143 * This could be a KSEG0 address and thus save TLB entries but 144 * its faster and simpler in assembly language to have a 145 * fixed address that can be accessed with a 16 bit signed offset. 146 * Note: the kernel pm_hash field is null, user pm_hash fields are 147 * either the table or zero_pmap_hash. 148 */ 149 kernel_pmap_store.pm_hash = (pmap_hash_t)0; 150 for (i = 0; i < PMAP_HASH_KPAGES; i++) { 151 MachTLBWriteIndexed(i + UPAGES + PMAP_HASH_UPAGES, 152 PMAP_HASH_KADDR + (i << PGSHIFT), 153 firstaddr | PG_V | PG_M | PG_G); 154 firstaddr += NBPG; 155 } 156 157 /* 158 * Allocate an empty TLB hash table for initial pmap's. 159 */ 160 zero_pmap_hash = (pmap_hash_t)MACH_PHYS_TO_CACHED(firstaddr); 161 162 /* init proc[0]'s pmap hash table */ 163 for (i = 0; i < PMAP_HASH_UPAGES; i++) { 164 kernel_pmap_store.pm_hash_ptes[i] = firstaddr | PG_V | PG_RO; 165 MachTLBWriteIndexed(i + UPAGES, 166 (PMAP_HASH_UADDR + (i << PGSHIFT)) | 167 (1 << VMMACH_TLB_PID_SHIFT), 168 kernel_pmap_store.pm_hash_ptes[i]); 169 firstaddr += NBPG; 170 } 171 172 /* 173 * Allocate memory for pv_table. 174 * This will allocate more entries than we really need. 175 * We should do this in pmap_init when we know the actual 176 * phys_start and phys_end but its better to use phys addresses 177 * rather than kernel virtual addresses mapped through the TLB. 178 */ 179 i = (maxmem - pmax_btop(firstaddr)) * sizeof(struct pv_entry); 180 i = pmax_round_page(i); 181 pv_table = (pv_entry_t)MACH_PHYS_TO_CACHED(firstaddr); 182 firstaddr += i; 183 184 /* 185 * Clear allocated memory. 186 */ 187 bzero((caddr_t)MACH_PHYS_TO_CACHED(start), firstaddr - start); 188 189 avail_start = firstaddr; 190 avail_end = pmax_ptob(maxmem); 191 mem_size = avail_end - avail_start; 192 193 virtual_avail = VM_MIN_KERNEL_ADDRESS; 194 virtual_end = VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES * NPTEPG * NBPG; 195 /* XXX need to decide how to set cnt.v_page_size */ 196 pmaxpagesperpage = 1; 197 198 cur_pmap = &kernel_pmap_store; 199 simple_lock_init(&kernel_pmap_store.pm_lock); 200 kernel_pmap_store.pm_count = 1; 201 } 202 203 /* 204 * Bootstrap memory allocator. This function allows for early dynamic 205 * memory allocation until the virtual memory system has been bootstrapped. 206 * After that point, either kmem_alloc or malloc should be used. This 207 * function works by stealing pages from the (to be) managed page pool, 208 * stealing virtual address space, then mapping the pages and zeroing them. 209 * 210 * It should be used from pmap_bootstrap till vm_page_startup, afterwards 211 * it cannot be used, and will generate a panic if tried. Note that this 212 * memory will never be freed, and in essence it is wired down. 213 */ 214 void * 215 pmap_bootstrap_alloc(size) 216 int size; 217 { 218 vm_offset_t val; 219 extern boolean_t vm_page_startup_initialized; 220 221 if (vm_page_startup_initialized) 222 panic("pmap_bootstrap_alloc: called after startup initialized"); 223 224 val = MACH_PHYS_TO_CACHED(avail_start); 225 size = round_page(size); 226 avail_start += size; 227 228 blkclr((caddr_t)val, size); 229 return ((void *)val); 230 } 231 232 /* 233 * Initialize the pmap module. 234 * Called by vm_init, to initialize any structures that the pmap 235 * system needs to map virtual memory. 236 */ 237 void 238 pmap_init(phys_start, phys_end) 239 vm_offset_t phys_start, phys_end; 240 { 241 242 #ifdef DEBUG 243 if (pmapdebug & PDB_FOLLOW) 244 printf("pmap_init(%x, %x)\n", phys_start, phys_end); 245 #endif 246 } 247 248 /* 249 * Used to map a range of physical addresses into kernel 250 * virtual address space. 251 * 252 * This routine should only be called by vm_page_startup() 253 * with KSEG0 addresses. 254 */ 255 vm_offset_t 256 pmap_map(virt, start, end, prot) 257 vm_offset_t virt; 258 vm_offset_t start; 259 vm_offset_t end; 260 int prot; 261 { 262 263 #ifdef DEBUG 264 if (pmapdebug & PDB_FOLLOW) 265 printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot); 266 #endif 267 268 return (round_page(end)); 269 } 270 271 /* 272 * Create and return a physical map. 273 * 274 * If the size specified for the map 275 * is zero, the map is an actual physical 276 * map, and may be referenced by the 277 * hardware. 278 * 279 * If the size specified is non-zero, 280 * the map will be used in software only, and 281 * is bounded by that size. 282 */ 283 pmap_t 284 pmap_create(size) 285 vm_size_t size; 286 { 287 register pmap_t pmap; 288 289 #ifdef DEBUG 290 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 291 printf("pmap_create(%x)\n", size); 292 #endif 293 /* 294 * Software use map does not need a pmap 295 */ 296 if (size) 297 return (NULL); 298 299 printf("pmap_create(%x) XXX\n", size); /* XXX */ 300 /* XXX: is it ok to wait here? */ 301 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); 302 #ifdef notifwewait 303 if (pmap == NULL) 304 panic("pmap_create: cannot allocate a pmap"); 305 #endif 306 bzero(pmap, sizeof(*pmap)); 307 pmap_pinit(pmap); 308 return (pmap); 309 } 310 311 /* 312 * Initialize a preallocated and zeroed pmap structure, 313 * such as one in a vmspace structure. 314 */ 315 void 316 pmap_pinit(pmap) 317 register struct pmap *pmap; 318 { 319 register int i; 320 extern struct vmspace vmspace0; 321 322 #ifdef DEBUG 323 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 324 printf("pmap_pinit(%x)\n", pmap); 325 #endif 326 simple_lock_init(&pmap->pm_lock); 327 pmap->pm_count = 1; 328 pmap->pm_flags = 0; 329 pmap->pm_hash = zero_pmap_hash; 330 for (i = 0; i < PMAP_HASH_UPAGES; i++) 331 pmap->pm_hash_ptes[i] = 332 (MACH_CACHED_TO_PHYS(zero_pmap_hash) + (i << PGSHIFT)) | 333 PG_V | PG_RO; 334 if (pmap == &vmspace0.vm_pmap) 335 pmap->pm_tlbpid = 1; /* preallocated in mach_init() */ 336 else 337 pmap->pm_tlbpid = -1; /* none allocated yet */ 338 } 339 340 /* 341 * Retire the given physical map from service. 342 * Should only be called if the map contains 343 * no valid mappings. 344 */ 345 void 346 pmap_destroy(pmap) 347 register pmap_t pmap; 348 { 349 int count; 350 351 #ifdef DEBUG 352 if (pmapdebug & PDB_FOLLOW) 353 printf("pmap_destroy(%x)\n", pmap); 354 #endif 355 if (pmap == NULL) 356 return; 357 358 printf("pmap_destroy(%x) XXX\n", pmap); /* XXX */ 359 simple_lock(&pmap->pm_lock); 360 count = --pmap->pm_count; 361 simple_unlock(&pmap->pm_lock); 362 if (count == 0) { 363 pmap_release(pmap); 364 free((caddr_t)pmap, M_VMPMAP); 365 } 366 } 367 368 /* 369 * Release any resources held by the given physical map. 370 * Called when a pmap initialized by pmap_pinit is being released. 371 * Should only be called if the map contains no valid mappings. 372 */ 373 void 374 pmap_release(pmap) 375 register pmap_t pmap; 376 { 377 register int id; 378 #ifdef DIAGNOSTIC 379 register int i; 380 #endif 381 382 #ifdef DEBUG 383 if (pmapdebug & PDB_FOLLOW) 384 printf("pmap_release(%x)\n", pmap); 385 #endif 386 387 if (pmap->pm_hash && pmap->pm_hash != zero_pmap_hash) { 388 kmem_free(kernel_map, (vm_offset_t)pmap->pm_hash, 389 PMAP_HASH_SIZE); 390 pmap->pm_hash = zero_pmap_hash; 391 } 392 if ((id = pmap->pm_tlbpid) < 0) 393 return; 394 #ifdef DIAGNOSTIC 395 if (!(whichpids[id >> 5] & (1 << (id & 0x1F)))) 396 panic("pmap_release: id free"); 397 #endif 398 MachTLBFlushPID(id); 399 whichpids[id >> 5] &= ~(1 << (id & 0x1F)); 400 pmap->pm_flags &= ~PM_MODIFIED; 401 pmap->pm_tlbpid = -1; 402 if (pmap == cur_pmap) 403 cur_pmap = (pmap_t)0; 404 #ifdef DIAGNOSTIC 405 /* invalidate user PTE cache */ 406 for (i = 0; i < PMAP_HASH_UPAGES; i++) 407 MachTLBWriteIndexed(i + UPAGES, MACH_RESERVED_ADDR, 0); 408 #endif 409 } 410 411 /* 412 * Add a reference to the specified pmap. 413 */ 414 void 415 pmap_reference(pmap) 416 pmap_t pmap; 417 { 418 419 #ifdef DEBUG 420 if (pmapdebug & PDB_FOLLOW) 421 printf("pmap_reference(%x)\n", pmap); 422 #endif 423 if (pmap != NULL) { 424 simple_lock(&pmap->pm_lock); 425 pmap->pm_count++; 426 simple_unlock(&pmap->pm_lock); 427 } 428 } 429 430 /* 431 * Remove the given range of addresses from the specified map. 432 * 433 * It is assumed that the start and end are properly 434 * rounded to the page size. 435 */ 436 void 437 pmap_remove(pmap, sva, eva) 438 register pmap_t pmap; 439 vm_offset_t sva, eva; 440 { 441 register vm_offset_t va; 442 register pv_entry_t pv, npv; 443 register int i; 444 pmap_hash_t hp; 445 unsigned entry; 446 447 #ifdef DEBUG 448 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 449 printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva); 450 remove_stats.calls++; 451 #endif 452 if (pmap == NULL) 453 return; 454 455 /* anything in the cache? */ 456 if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash) 457 return; 458 459 if (!pmap->pm_hash) { 460 register pt_entry_t *pte; 461 462 /* remove entries from kernel pmap */ 463 #ifdef DIAGNOSTIC 464 if (sva < VM_MIN_KERNEL_ADDRESS || 465 eva > VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG) 466 panic("pmap_remove"); 467 #endif 468 pte = kvtopte(sva); 469 for (va = sva; va < eva; va += NBPG, pte++) { 470 entry = pte->pt_entry; 471 if (!(entry & PG_V)) 472 continue; 473 if (entry & PG_WIRED) 474 pmap->pm_stats.wired_count--; 475 pmap->pm_stats.resident_count--; 476 pmap_remove_pv(pmap, va, entry & PG_FRAME); 477 #ifdef ATTR 478 pmap_attributes[atop(entry - KERNBASE)] = 0; 479 #endif 480 pte->pt_entry = PG_NV; 481 /* 482 * Flush the TLB for the given address. 483 */ 484 MachTLBFlushAddr(va); 485 #ifdef DEBUG 486 remove_stats.flushes++; 487 #endif 488 } 489 return; 490 } 491 492 va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 493 eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 494 /* 495 * If we are not in the current address space, just flush the 496 * software cache and not the hardware. 497 */ 498 if (pmap != cur_pmap) { 499 for (; va < eva; va += NBPG) { 500 hp = &pmap->pm_hash[PMAP_HASH(va)]; 501 if (hp->pmh_pte[0].high == va) 502 i = 0; 503 else if (hp->pmh_pte[1].high == va) 504 i = 1; 505 else 506 continue; 507 508 hp->pmh_pte[i].high = 0; 509 entry = hp->pmh_pte[i].low; 510 if (entry & PG_WIRED) 511 pmap->pm_stats.wired_count--; 512 pmap->pm_stats.resident_count--; 513 pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME); 514 #ifdef ATTR 515 pmap_attributes[atop(entry - KERNBASE)] = 0; 516 #endif 517 pmap->pm_flags |= PM_MODIFIED; 518 #ifdef DEBUG 519 remove_stats.removes++; 520 #endif 521 } 522 return; 523 } 524 525 for (; va < eva; va += NBPG) { 526 hp = &pmap->pm_hash[PMAP_HASH(va)]; 527 if (hp->pmh_pte[0].high == va) 528 i = 0; 529 else if (hp->pmh_pte[1].high == va) 530 i = 1; 531 else 532 continue; 533 534 hp->pmh_pte[i].high = 0; 535 entry = hp->pmh_pte[i].low; 536 if (entry & PG_WIRED) 537 pmap->pm_stats.wired_count--; 538 pmap->pm_stats.resident_count--; 539 pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME); 540 #ifdef ATTR 541 pmap_attributes[atop(entry - KERNBASE)] = 0; 542 #endif 543 /* 544 * Flush the TLB for the given address. 545 */ 546 MachTLBFlushAddr(va); 547 #ifdef DEBUG 548 remove_stats.flushes++; 549 #endif 550 } 551 } 552 553 /* 554 * pmap_page_protect: 555 * 556 * Lower the permission for all mappings to a given page. 557 */ 558 void 559 pmap_page_protect(pa, prot) 560 vm_offset_t pa; 561 vm_prot_t prot; 562 { 563 register pv_entry_t pv; 564 register vm_offset_t va; 565 int s; 566 567 #ifdef DEBUG 568 if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || 569 prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)) 570 printf("pmap_page_protect(%x, %x)\n", pa, prot); 571 #endif 572 if (!IS_VM_PHYSADDR(pa)) 573 return; 574 575 switch (prot) { 576 case VM_PROT_ALL: 577 break; 578 579 /* copy_on_write */ 580 case VM_PROT_READ: 581 case VM_PROT_READ|VM_PROT_EXECUTE: 582 pv = pa_to_pvh(pa); 583 s = splimp(); 584 /* 585 * Loop over all current mappings setting/clearing as appropos. 586 */ 587 if (pv->pv_pmap != NULL) { 588 for (; pv; pv = pv->pv_next) { 589 extern vm_offset_t pager_sva, pager_eva; 590 va = pv->pv_va; 591 592 /* 593 * XXX don't write protect pager mappings 594 */ 595 if (va >= pager_sva && va < pager_eva) 596 continue; 597 pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE, 598 prot); 599 } 600 } 601 splx(s); 602 break; 603 604 /* remove_all */ 605 default: 606 pv = pa_to_pvh(pa); 607 s = splimp(); 608 while (pv->pv_pmap != NULL) { 609 pmap_remove(pv->pv_pmap, pv->pv_va, 610 pv->pv_va + PAGE_SIZE); 611 } 612 splx(s); 613 } 614 } 615 616 /* 617 * Set the physical protection on the 618 * specified range of this map as requested. 619 */ 620 void 621 pmap_protect(pmap, sva, eva, prot) 622 register pmap_t pmap; 623 vm_offset_t sva, eva; 624 vm_prot_t prot; 625 { 626 register vm_offset_t va; 627 register int i; 628 pmap_hash_t hp; 629 u_int p; 630 631 #ifdef DEBUG 632 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 633 printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot); 634 #endif 635 if (pmap == NULL) 636 return; 637 638 /* anything in the software cache? */ 639 if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash) 640 return; 641 642 if (!(prot & VM_PROT_READ)) { 643 pmap_remove(pmap, sva, eva); 644 return; 645 } 646 647 if (!pmap->pm_hash) { 648 register pt_entry_t *pte; 649 650 /* 651 * Change entries in kernel pmap. 652 * This will trap if the page is writeable (in order to set 653 * the dirty bit) even if the dirty bit is already set. The 654 * optimization isn't worth the effort since this code isn't 655 * executed much. The common case is to make a user page 656 * read-only. 657 */ 658 #ifdef DIAGNOSTIC 659 if (sva < VM_MIN_KERNEL_ADDRESS || 660 eva > VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG) 661 panic("pmap_protect"); 662 #endif 663 p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO; 664 pte = kvtopte(sva); 665 for (va = sva; va < eva; va += NBPG, pte++) { 666 if (!(pte->pt_entry & PG_V)) 667 continue; 668 pte->pt_entry = (pte->pt_entry & ~(PG_M | PG_RO)) | p; 669 /* 670 * Update the TLB if the given address is in the cache. 671 */ 672 MachTLBUpdate(va, pte->pt_entry); 673 } 674 return; 675 } 676 677 p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO; 678 va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 679 eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 680 /* 681 * If we are not in the current address space, just flush the 682 * software cache and not the hardware. 683 */ 684 if (pmap != cur_pmap) { 685 for (; va < eva; va += NBPG) { 686 hp = &pmap->pm_hash[PMAP_HASH(va)]; 687 if (hp->pmh_pte[0].high == va) 688 i = 0; 689 else if (hp->pmh_pte[1].high == va) 690 i = 1; 691 else 692 continue; 693 694 hp->pmh_pte[i].low = (hp->pmh_pte[i].low & ~(PG_M | PG_RO)) | p; 695 pmap->pm_flags |= PM_MODIFIED; 696 } 697 return; 698 } 699 700 for (; va < eva; va += NBPG) { 701 hp = &pmap->pm_hash[PMAP_HASH(va)]; 702 if (hp->pmh_pte[0].high == va) 703 i = 0; 704 else if (hp->pmh_pte[1].high == va) 705 i = 1; 706 else 707 continue; 708 709 hp->pmh_pte[i].low = (hp->pmh_pte[i].low & ~(PG_M | PG_RO)) | p; 710 /* 711 * Update the TLB if the given address is in the cache. 712 */ 713 MachTLBUpdate(hp->pmh_pte[i].high, hp->pmh_pte[i].low); 714 } 715 } 716 717 /* 718 * Insert the given physical page (p) at 719 * the specified virtual address (v) in the 720 * target physical map with the protection requested. 721 * 722 * If specified, the page will be wired down, meaning 723 * that the related pte can not be reclaimed. 724 * 725 * NB: This is the only routine which MAY NOT lazy-evaluate 726 * or lose information. That is, this routine must actually 727 * insert this page into the given map NOW. 728 */ 729 void 730 pmap_enter(pmap, va, pa, prot, wired) 731 register pmap_t pmap; 732 vm_offset_t va; 733 register vm_offset_t pa; 734 vm_prot_t prot; 735 boolean_t wired; 736 { 737 register pmap_hash_t hp; 738 register u_int npte; 739 register int i, j; 740 int newpos; 741 742 #ifdef DEBUG 743 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 744 printf("pmap_enter(%x, %x, %x, %x, %x)\n", 745 pmap, va, pa, prot, wired); 746 #endif 747 #ifdef DIAGNOSTIC 748 if (!pmap) 749 panic("pmap_enter: pmap"); 750 if (pmap->pm_tlbpid < 0) 751 panic("pmap_enter: tlbpid"); 752 if (!pmap->pm_hash) { 753 enter_stats.kernel++; 754 if (va < VM_MIN_KERNEL_ADDRESS || 755 va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG) 756 panic("pmap_enter: kva"); 757 } else { 758 enter_stats.user++; 759 if (va & 0x80000000) 760 panic("pmap_enter: uva"); 761 } 762 if (pa & 0x80000000) 763 panic("pmap_enter: pa"); 764 if (!(prot & VM_PROT_READ)) 765 panic("pmap_enter: prot"); 766 #endif 767 768 /* 769 * See if we need to create a new TLB cache. 770 */ 771 if (pmap->pm_hash == zero_pmap_hash) { 772 register vm_offset_t kva; 773 register pt_entry_t *pte; 774 775 kva = kmem_alloc(kernel_map, PMAP_HASH_SIZE); 776 pmap->pm_hash = (pmap_hash_t)kva; 777 778 /* 779 * Convert the kernel virtual address to a physical one 780 * and cache it in the pmap. Note: if the phyical address 781 * can change (due to memory compaction in kmem_alloc?), 782 * we will have to update things. 783 */ 784 pte = kvtopte(kva); 785 for (i = 0; i < PMAP_HASH_UPAGES; i++) { 786 pmap->pm_hash_ptes[i] = pte->pt_entry & ~PG_G; 787 pte++; 788 } 789 790 /* 791 * Map in new TLB cache if it is current. 792 */ 793 if (pmap == cur_pmap) { 794 for (i = 0; i < PMAP_HASH_UPAGES; i++) { 795 MachTLBWriteIndexed(i + UPAGES, 796 (PMAP_HASH_UADDR + (i << PGSHIFT)) | 797 (pmap->pm_tlbpid << 798 VMMACH_TLB_PID_SHIFT), 799 pmap->pm_hash_ptes[i]); 800 } 801 } 802 #ifdef DIAGNOSTIC 803 for (i = 0; i < PAGE_SIZE; i += sizeof(int), kva += sizeof(int)) 804 if (*(int *)kva != 0) 805 panic("pmap_enter: *kva != 0"); 806 #endif 807 } 808 809 if (IS_VM_PHYSADDR(pa)) { 810 register pv_entry_t pv, npv; 811 int s; 812 813 if (!(prot & VM_PROT_WRITE)) 814 npte = PG_RO; 815 else { 816 register vm_page_t mem; 817 818 mem = PHYS_TO_VM_PAGE(pa); 819 if ((int)va < 0) { 820 /* 821 * Don't bother to trap on kernel writes, 822 * just record page as dirty. 823 */ 824 npte = PG_M; 825 mem->clean = FALSE; 826 } else 827 #ifdef ATTR 828 if ((pmap_attributes[atop(pa - KERNBASE)] & 829 PMAP_ATTR_MOD) || !mem->clean) 830 #else 831 if (!mem->clean) 832 #endif 833 npte = PG_M; 834 else 835 npte = 0; 836 } 837 838 #ifdef DEBUG 839 enter_stats.managed++; 840 #endif 841 /* 842 * Enter the pmap and virtual address into the 843 * physical to virtual map table. 844 */ 845 pv = pa_to_pvh(pa); 846 s = splimp(); 847 #ifdef DEBUG 848 if (pmapdebug & PDB_ENTER) 849 printf("pmap_enter: pv %x: was %x/%x/%x\n", 850 pv, pv->pv_va, pv->pv_pmap, pv->pv_next); 851 #endif 852 if (pv->pv_pmap == NULL) { 853 /* 854 * No entries yet, use header as the first entry 855 */ 856 #ifdef DEBUG 857 enter_stats.firstpv++; 858 #endif 859 pv->pv_va = va; 860 pv->pv_pmap = pmap; 861 pv->pv_next = NULL; 862 } else { 863 /* 864 * There is at least one other VA mapping this page. 865 * Place this entry after the header. 866 * 867 * Note: the entry may already be in the table if 868 * we are only changing the protection bits. 869 */ 870 for (npv = pv; npv; npv = npv->pv_next) 871 if (pmap == npv->pv_pmap && va == npv->pv_va) { 872 #ifdef DIAGNOSTIC 873 if (!pmap->pm_hash) { 874 unsigned entry; 875 876 entry = kvtopte(va)->pt_entry; 877 if (!(entry & PG_V) || 878 (entry & PG_FRAME) != pa) 879 printf("found kva %x pa %x in pv_table but != %x\n", 880 va, pa, entry); 881 } else { 882 hp = &pmap->pm_hash[PMAP_HASH(va)]; 883 if ((hp->pmh_pte[0].high == (va | 884 (pmap->pm_tlbpid << 885 VMMACH_TLB_PID_SHIFT)) && 886 (hp->pmh_pte[0].low & PG_FRAME) == pa) || 887 (hp->pmh_pte[1].high == (va | 888 (pmap->pm_tlbpid << 889 VMMACH_TLB_PID_SHIFT)) && 890 (hp->pmh_pte[1].low & PG_FRAME) == pa)) 891 goto fnd; 892 printf("found va %x pa %x in pv_table but !=\n", 893 va, pa); 894 } 895 #endif 896 goto fnd; 897 } 898 /* can this cause us to recurse forever? */ 899 npv = (pv_entry_t) 900 malloc(sizeof *npv, M_VMPVENT, M_NOWAIT); 901 npv->pv_va = va; 902 npv->pv_pmap = pmap; 903 npv->pv_next = pv->pv_next; 904 pv->pv_next = npv; 905 #ifdef DEBUG 906 if (!npv->pv_next) 907 enter_stats.secondpv++; 908 #endif 909 fnd: 910 ; 911 } 912 splx(s); 913 } else { 914 /* 915 * Assumption: if it is not part of our managed memory 916 * then it must be device memory which may be volitile. 917 */ 918 #ifdef DEBUG 919 enter_stats.unmanaged++; 920 #endif 921 printf("pmap_enter: UNMANAGED ADDRESS va %x pa %x\n", 922 va, pa); /* XXX */ 923 npte = (prot & VM_PROT_WRITE) ? PG_M : PG_RO; 924 } 925 926 /* 927 * The only time we need to flush the cache is if we 928 * execute from a physical address and then change the data. 929 * This is the best place to do this. 930 * pmap_protect() and pmap_remove() are mostly used to switch 931 * between R/W and R/O pages. 932 * NOTE: we only support cache flush for read only text. 933 */ 934 if (prot == (VM_PROT_READ | VM_PROT_EXECUTE)) 935 MachFlushICache(MACH_PHYS_TO_CACHED(pa), PAGE_SIZE); 936 937 if (!pmap->pm_hash) { 938 register pt_entry_t *pte; 939 940 /* enter entries into kernel pmap */ 941 pte = kvtopte(va); 942 npte |= pa | PG_V | PG_G; 943 if (wired) { 944 pmap->pm_stats.wired_count += pmaxpagesperpage; 945 npte |= PG_WIRED; 946 } 947 i = pmaxpagesperpage; 948 do { 949 if (!(pte->pt_entry & PG_V)) { 950 pmap->pm_stats.resident_count++; 951 MachTLBWriteRandom(va, npte); 952 } else { 953 #ifdef DIAGNOSTIC 954 if (pte->pt_entry & PG_WIRED) 955 panic("pmap_enter: kernel wired"); 956 #endif 957 /* 958 * Update the same virtual address entry. 959 */ 960 MachTLBUpdate(va, npte); 961 printf("TLB update kva %x pte %x -> %x\n", 962 va, pte->pt_entry, npte); /* XXX */ 963 } 964 pte->pt_entry = npte; 965 va += NBPG; 966 npte += NBPG; 967 pte++; 968 } while (--i != 0); 969 return; 970 } 971 972 /* 973 * Now validate mapping with desired protection/wiring. 974 * Assume uniform modified and referenced status for all 975 * PMAX pages in a MACH page. 976 */ 977 npte |= pa | PG_V; 978 if (wired) { 979 pmap->pm_stats.wired_count += pmaxpagesperpage; 980 npte |= PG_WIRED; 981 } 982 #ifdef DEBUG 983 if (pmapdebug & PDB_ENTER) 984 printf("pmap_enter: new pte value %x\n", npte); 985 #endif 986 va |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 987 i = pmaxpagesperpage; 988 do { 989 hp = &pmap->pm_hash[PMAP_HASH(va)]; 990 if (hp->pmh_pte[0].high == va && 991 (hp->pmh_pte[0].low & PG_FRAME) == (npte & PG_FRAME)) 992 j = 0; 993 else if (hp->pmh_pte[1].high == va && 994 (hp->pmh_pte[1].low & PG_FRAME) == (npte & PG_FRAME)) 995 j = 1; 996 else 997 j = -1; 998 if (j >= 0) { 999 #ifdef DEBUG 1000 enter_stats.cachehit++; 1001 #endif 1002 if (!(hp->pmh_pte[j].low & PG_WIRED)) { 1003 /* 1004 * Update the same entry. 1005 */ 1006 hp->pmh_pte[j].low = npte; 1007 MachTLBUpdate(va, npte); 1008 } else { 1009 /* 1010 * Don't replace wired entries, just update 1011 * the hardware TLB. 1012 * Bug: routines to flush the TLB won't know 1013 * that the entry is in the hardware. 1014 */ 1015 printf("pmap_enter: wired va %x %x\n", va, 1016 hp->pmh_pte[j].low); /* XXX */ 1017 panic("pmap_enter: wired"); /* XXX */ 1018 MachTLBWriteRandom(va, npte); 1019 } 1020 goto next; 1021 } 1022 if (!hp->pmh_pte[0].high) 1023 j = 0; 1024 else if (!hp->pmh_pte[1].high) 1025 j = 1; 1026 else 1027 j = -1; 1028 if (j >= 0) { 1029 pmap->pm_stats.resident_count++; 1030 hp->pmh_pte[j].high = va; 1031 hp->pmh_pte[j].low = npte; 1032 MachTLBWriteRandom(va, npte); 1033 } else { 1034 #ifdef DEBUG 1035 enter_stats.cachehit++; 1036 #endif 1037 if (!(hp->pmh_pte[1].low & PG_WIRED)) { 1038 MachTLBFlushAddr(hp->pmh_pte[1].high); 1039 pmap_remove_pv(pmap, 1040 hp->pmh_pte[1].high & PG_FRAME, 1041 hp->pmh_pte[1].low & PG_FRAME); 1042 hp->pmh_pte[1] = hp->pmh_pte[0]; 1043 hp->pmh_pte[0].high = va; 1044 hp->pmh_pte[0].low = npte; 1045 MachTLBWriteRandom(va, npte); 1046 } else { 1047 /* 1048 * Don't replace wired entries, just update 1049 * the hardware TLB. 1050 * Bug: routines to flush the TLB won't know 1051 * that the entry is in the hardware. 1052 */ 1053 printf("pmap_enter: wired va %x %x\n", va, 1054 hp->pmh_pte[1].low); /* XXX */ 1055 panic("pmap_enter: wired"); /* XXX */ 1056 MachTLBWriteRandom(va, npte); 1057 } 1058 } 1059 next: 1060 va += NBPG; 1061 npte += NBPG; 1062 } while (--i != 0); 1063 } 1064 1065 /* 1066 * Routine: pmap_change_wiring 1067 * Function: Change the wiring attribute for a map/virtual-address 1068 * pair. 1069 * In/out conditions: 1070 * The mapping must already exist in the pmap. 1071 */ 1072 void 1073 pmap_change_wiring(pmap, va, wired) 1074 register pmap_t pmap; 1075 vm_offset_t va; 1076 boolean_t wired; 1077 { 1078 register pmap_hash_t hp; 1079 u_int p; 1080 register int i, j; 1081 1082 #ifdef DEBUG 1083 if (pmapdebug & PDB_FOLLOW) 1084 printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired); 1085 #endif 1086 if (pmap == NULL) 1087 return; 1088 1089 p = wired ? PG_WIRED : 0; 1090 1091 /* 1092 * Don't need to flush the TLB since PG_WIRED is only in software. 1093 */ 1094 if (!pmap->pm_hash) { 1095 register pt_entry_t *pte; 1096 1097 /* change entries in kernel pmap */ 1098 #ifdef DIAGNOSTIC 1099 if (va < VM_MIN_KERNEL_ADDRESS || 1100 va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG) 1101 panic("pmap_change_wiring"); 1102 #endif 1103 pte = kvtopte(va); 1104 i = pmaxpagesperpage; 1105 if (!(pte->pt_entry & PG_WIRED) && p) 1106 pmap->pm_stats.wired_count += i; 1107 else if ((pte->pt_entry & PG_WIRED) && !p) 1108 pmap->pm_stats.wired_count -= i; 1109 do { 1110 if (!(pte->pt_entry & PG_V)) 1111 continue; 1112 pte->pt_entry = (pte->pt_entry & ~PG_WIRED) | p; 1113 pte++; 1114 } while (--i != 0); 1115 } else if (pmap->pm_tlbpid >= 0 && pmap->pm_hash != zero_pmap_hash) { 1116 i = pmaxpagesperpage; 1117 va = (va & PG_FRAME) | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 1118 do { 1119 hp = &pmap->pm_hash[PMAP_HASH(va)]; 1120 if (hp->pmh_pte[0].high == va) 1121 j = 0; 1122 else if (hp->pmh_pte[1].high == va) 1123 j = 1; 1124 else { 1125 va += NBPG; 1126 continue; 1127 } 1128 if (!(hp->pmh_pte[j].low & PG_WIRED) && p) 1129 pmap->pm_stats.wired_count++; 1130 else if ((hp->pmh_pte[j].low & PG_WIRED) && !p) 1131 pmap->pm_stats.wired_count--; 1132 hp->pmh_pte[j].low = (hp->pmh_pte[j].low & ~PG_WIRED) | p; 1133 va += NBPG; 1134 } while (--i != 0); 1135 } 1136 } 1137 1138 /* 1139 * Routine: pmap_extract 1140 * Function: 1141 * Extract the physical page address associated 1142 * with the given map/virtual_address pair. 1143 */ 1144 vm_offset_t 1145 pmap_extract(pmap, va) 1146 register pmap_t pmap; 1147 vm_offset_t va; 1148 { 1149 register vm_offset_t pa; 1150 register pmap_hash_t hp; 1151 register int i; 1152 1153 #ifdef DEBUG 1154 if (pmapdebug & PDB_FOLLOW) 1155 printf("pmap_extract(%x, %x) -> ", pmap, va); 1156 #endif 1157 1158 if (!pmap->pm_hash) { 1159 #ifdef DIAGNOSTIC 1160 if (va < VM_MIN_KERNEL_ADDRESS || 1161 va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG) 1162 panic("pmap_extract"); 1163 #endif 1164 pa = kvtopte(va)->pt_entry & PG_FRAME; 1165 } else if (pmap->pm_tlbpid >= 0) { 1166 hp = &pmap->pm_hash[PMAP_HASH(va)]; 1167 va = (va & PG_FRAME) | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 1168 if (hp->pmh_pte[0].high == va) 1169 pa = hp->pmh_pte[0].low & PG_FRAME; 1170 else if (hp->pmh_pte[1].high == va) 1171 pa = hp->pmh_pte[1].low & PG_FRAME; 1172 else 1173 pa = 0; 1174 } else 1175 pa = 0; 1176 1177 #ifdef DEBUG 1178 if (pmapdebug & PDB_FOLLOW) 1179 printf("%x\n", pa); 1180 #endif 1181 return (pa); 1182 } 1183 1184 /* 1185 * Copy the range specified by src_addr/len 1186 * from the source map to the range dst_addr/len 1187 * in the destination map. 1188 * 1189 * This routine is only advisory and need not do anything. 1190 */ 1191 void 1192 pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 1193 pmap_t dst_pmap; 1194 pmap_t src_pmap; 1195 vm_offset_t dst_addr; 1196 vm_size_t len; 1197 vm_offset_t src_addr; 1198 { 1199 1200 #ifdef DEBUG 1201 if (pmapdebug & PDB_FOLLOW) 1202 printf("pmap_copy(%x, %x, %x, %x, %x)\n", 1203 dst_pmap, src_pmap, dst_addr, len, src_addr); 1204 #endif 1205 } 1206 1207 /* 1208 * Require that all active physical maps contain no 1209 * incorrect entries NOW. [This update includes 1210 * forcing updates of any address map caching.] 1211 * 1212 * Generally used to insure that a thread about 1213 * to run will see a semantically correct world. 1214 */ 1215 void 1216 pmap_update() 1217 { 1218 1219 #ifdef DEBUG 1220 if (pmapdebug & PDB_FOLLOW) 1221 printf("pmap_update()\n"); 1222 #endif 1223 } 1224 1225 /* 1226 * Routine: pmap_collect 1227 * Function: 1228 * Garbage collects the physical map system for 1229 * pages which are no longer used. 1230 * Success need not be guaranteed -- that is, there 1231 * may well be pages which are not referenced, but 1232 * others may be collected. 1233 * Usage: 1234 * Called by the pageout daemon when pages are scarce. 1235 */ 1236 void 1237 pmap_collect(pmap) 1238 pmap_t pmap; 1239 { 1240 1241 #ifdef DEBUG 1242 if (pmapdebug & PDB_FOLLOW) 1243 printf("pmap_collect(%x)\n", pmap); 1244 #endif 1245 } 1246 1247 /* 1248 * pmap_zero_page zeros the specified (machine independent) 1249 * page. 1250 */ 1251 void 1252 pmap_zero_page(phys) 1253 vm_offset_t phys; 1254 { 1255 register int *p, *end; 1256 1257 #ifdef DEBUG 1258 if (pmapdebug & PDB_FOLLOW) 1259 printf("pmap_zero_page(%x)\n", phys); 1260 #endif 1261 p = (int *)MACH_PHYS_TO_CACHED(phys); 1262 end = p + PAGE_SIZE / sizeof(int); 1263 do { 1264 p[0] = 0; 1265 p[1] = 0; 1266 p[2] = 0; 1267 p[3] = 0; 1268 p += 4; 1269 } while (p != end); 1270 } 1271 1272 /* 1273 * pmap_copy_page copies the specified (machine independent) 1274 * page. 1275 */ 1276 void 1277 pmap_copy_page(src, dst) 1278 vm_offset_t src, dst; 1279 { 1280 register int *s, *d, *end; 1281 register int tmp0, tmp1, tmp2, tmp3; 1282 1283 #ifdef DEBUG 1284 if (pmapdebug & PDB_FOLLOW) 1285 printf("pmap_copy_page(%x, %x)\n", src, dst); 1286 #endif 1287 s = (int *)MACH_PHYS_TO_CACHED(src); 1288 d = (int *)MACH_PHYS_TO_CACHED(dst); 1289 end = s + PAGE_SIZE / sizeof(int); 1290 do { 1291 tmp0 = s[0]; 1292 tmp1 = s[1]; 1293 tmp2 = s[2]; 1294 tmp3 = s[3]; 1295 d[0] = tmp0; 1296 d[1] = tmp1; 1297 d[2] = tmp2; 1298 d[3] = tmp3; 1299 s += 4; 1300 d += 4; 1301 } while (s != end); 1302 } 1303 1304 /* 1305 * Routine: pmap_pageable 1306 * Function: 1307 * Make the specified pages (by pmap, offset) 1308 * pageable (or not) as requested. 1309 * 1310 * A page which is not pageable may not take 1311 * a fault; therefore, its page table entry 1312 * must remain valid for the duration. 1313 * 1314 * This routine is merely advisory; pmap_enter 1315 * will specify that these pages are to be wired 1316 * down (or not) as appropriate. 1317 */ 1318 void 1319 pmap_pageable(pmap, sva, eva, pageable) 1320 pmap_t pmap; 1321 vm_offset_t sva, eva; 1322 boolean_t pageable; 1323 { 1324 1325 #ifdef DEBUG 1326 if (pmapdebug & PDB_FOLLOW) 1327 printf("pmap_pageable(%x, %x, %x, %x)\n", 1328 pmap, sva, eva, pageable); 1329 #endif 1330 } 1331 1332 /* 1333 * Clear the modify bits on the specified physical page. 1334 */ 1335 void 1336 pmap_clear_modify(pa) 1337 vm_offset_t pa; 1338 { 1339 pmap_hash_t hp; 1340 1341 #ifdef DEBUG 1342 if (pmapdebug & PDB_FOLLOW) 1343 printf("pmap_clear_modify(%x)\n", pa); 1344 #endif 1345 #ifdef ATTR 1346 pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_MOD; 1347 #endif 1348 } 1349 1350 /* 1351 * pmap_clear_reference: 1352 * 1353 * Clear the reference bit on the specified physical page. 1354 */ 1355 void 1356 pmap_clear_reference(pa) 1357 vm_offset_t pa; 1358 { 1359 1360 #ifdef DEBUG 1361 if (pmapdebug & PDB_FOLLOW) 1362 printf("pmap_clear_reference(%x)\n", pa); 1363 #endif 1364 #ifdef ATTR 1365 pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_REF; 1366 #endif 1367 } 1368 1369 /* 1370 * pmap_is_referenced: 1371 * 1372 * Return whether or not the specified physical page is referenced 1373 * by any physical maps. 1374 */ 1375 boolean_t 1376 pmap_is_referenced(pa) 1377 vm_offset_t pa; 1378 { 1379 #ifdef ATTR 1380 return (pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_REF); 1381 #else 1382 return (FALSE); 1383 #endif 1384 } 1385 1386 /* 1387 * pmap_is_modified: 1388 * 1389 * Return whether or not the specified physical page is modified 1390 * by any physical maps. 1391 */ 1392 boolean_t 1393 pmap_is_modified(pa) 1394 vm_offset_t pa; 1395 { 1396 #ifdef ATTR 1397 return (pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_MOD); 1398 #else 1399 return (FALSE); 1400 #endif 1401 } 1402 1403 vm_offset_t 1404 pmap_phys_address(ppn) 1405 int ppn; 1406 { 1407 1408 #ifdef DEBUG 1409 if (pmapdebug & PDB_FOLLOW) 1410 printf("pmap_phys_address(%x)\n", ppn); 1411 #endif 1412 panic("pmap_phys_address"); /* XXX */ 1413 return (pmax_ptob(ppn)); 1414 } 1415 1416 /* 1417 * Miscellaneous support routines 1418 */ 1419 1420 /* 1421 * Allocate a hardware PID and return it. 1422 * Also, change the hardwired TLB entry to point to the current TLB cache. 1423 * This is called by swtch(). 1424 */ 1425 int 1426 pmap_alloc_tlbpid(p) 1427 register struct proc *p; 1428 { 1429 register pmap_t pmap; 1430 register u_int i; 1431 register int id; 1432 1433 pmap = &p->p_vmspace->vm_pmap; 1434 if ((id = pmap->pm_tlbpid) >= 0) { 1435 if (pmap->pm_flags & PM_MODIFIED) { 1436 pmap->pm_flags &= ~PM_MODIFIED; 1437 MachTLBFlushPID(id); 1438 } 1439 goto done; 1440 } 1441 1442 if ((i = whichpids[0]) != 0xFFFFFFFF) 1443 id = 0; 1444 else if ((i = whichpids[1]) != 0xFFFFFFFF) 1445 id = 32; 1446 else { 1447 register struct proc *q; 1448 register pmap_t q_pmap; 1449 1450 /* 1451 * Have to find a tlbpid to recycle. 1452 * There is probably a better way to do this. 1453 */ 1454 for (q = (struct proc *)allproc; q != NULL; q = q->p_nxt) { 1455 q_pmap = &q->p_vmspace->vm_pmap; 1456 if ((id = q_pmap->pm_tlbpid) < 0) 1457 continue; 1458 if (q->p_stat != SRUN) 1459 goto fnd; 1460 } 1461 if (id < 0) 1462 panic("TLBPidAlloc"); 1463 fnd: 1464 printf("pmap_alloc_tlbpid: recycle pid %d (%s) tlbpid %d\n", 1465 q->p_pid, q->p_comm, id); /* XXX */ 1466 /* 1467 * Even though the virtual to physical mapping hasn't changed, 1468 * we need to clear the PID tag in the high entry of the cache. 1469 */ 1470 if (q_pmap->pm_hash != zero_pmap_hash) { 1471 register pmap_hash_t hp; 1472 register int j; 1473 1474 hp = q_pmap->pm_hash; 1475 for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) { 1476 for (j = 0; j < 2; j++) { 1477 if (!hp->pmh_pte[j].high) 1478 continue; 1479 1480 if (hp->pmh_pte[j].low & PG_WIRED) { 1481 printf("Clearing wired user entry! h %x l %x\n", hp->pmh_pte[j].high, hp->pmh_pte[j].low); 1482 panic("pmap_alloc_tlbpid: wired"); 1483 } 1484 pmap_remove_pv(q_pmap, 1485 hp->pmh_pte[j].high & PG_FRAME, 1486 hp->pmh_pte[j].low & PG_FRAME); 1487 hp->pmh_pte[j].high = 0; 1488 q_pmap->pm_stats.resident_count--; 1489 } 1490 } 1491 } 1492 q_pmap->pm_tlbpid = -1; 1493 MachTLBFlushPID(id); 1494 #ifdef DEBUG 1495 remove_stats.pidflushes++; 1496 #endif 1497 pmap->pm_tlbpid = id; 1498 goto done; 1499 } 1500 while (i & 1) { 1501 i >>= 1; 1502 id++; 1503 } 1504 whichpids[id >> 5] |= 1 << (id & 0x1F); 1505 pmap->pm_tlbpid = id; 1506 done: 1507 /* 1508 * Map in new TLB cache. 1509 */ 1510 if (pmap == cur_pmap) 1511 return (id); 1512 cur_pmap = pmap; 1513 for (i = 0; i < PMAP_HASH_UPAGES; i++) { 1514 MachTLBWriteIndexed(i + UPAGES, 1515 (PMAP_HASH_UADDR + (i << PGSHIFT)) | 1516 (id << VMMACH_TLB_PID_SHIFT), 1517 pmap->pm_hash_ptes[i]); 1518 } 1519 return (id); 1520 } 1521 1522 /* 1523 * Remove a physical to virtual address translation. 1524 */ 1525 void 1526 pmap_remove_pv(pmap, va, pa) 1527 pmap_t pmap; 1528 vm_offset_t va, pa; 1529 { 1530 register pv_entry_t pv, npv; 1531 int s; 1532 1533 #ifdef DEBUG 1534 if (pmapdebug & PDB_FOLLOW) 1535 printf("pmap_remove_pv(%x, %x, %x)\n", pmap, va, pa); 1536 #endif 1537 /* 1538 * Remove page from the PV table (raise IPL since we 1539 * may be called at interrupt time). 1540 */ 1541 if (!IS_VM_PHYSADDR(pa)) 1542 return; 1543 pv = pa_to_pvh(pa); 1544 s = splimp(); 1545 /* 1546 * If it is the first entry on the list, it is actually 1547 * in the header and we must copy the following entry up 1548 * to the header. Otherwise we must search the list for 1549 * the entry. In either case we free the now unused entry. 1550 */ 1551 if (pmap == pv->pv_pmap && va == pv->pv_va) { 1552 npv = pv->pv_next; 1553 if (npv) { 1554 *pv = *npv; 1555 free((caddr_t)npv, M_VMPVENT); 1556 } else 1557 pv->pv_pmap = NULL; 1558 #ifdef DEBUG 1559 remove_stats.pvfirst++; 1560 #endif 1561 } else { 1562 for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) { 1563 #ifdef DEBUG 1564 remove_stats.pvsearch++; 1565 #endif 1566 if (pmap == npv->pv_pmap && va == npv->pv_va) 1567 goto fnd; 1568 } 1569 #ifdef DIAGNOSTIC 1570 printf("pmap_remove_pv(%x, %x, %x) not found\n", pmap, va, pa); 1571 panic("pmap_remove_pv"); 1572 #endif 1573 fnd: 1574 pv->pv_next = npv->pv_next; 1575 free((caddr_t)npv, M_VMPVENT); 1576 } 1577 splx(s); 1578 } 1579 1580 #ifdef DEBUG 1581 pmap_print(pmap) 1582 pmap_t pmap; 1583 { 1584 register pmap_hash_t hp; 1585 register int i, j; 1586 1587 printf("\tpmap_print(%x)\n", pmap); 1588 1589 if (pmap->pm_hash == zero_pmap_hash) { 1590 printf("pm_hash == zero\n"); 1591 return; 1592 } 1593 if (pmap->pm_hash == (pmap_hash_t)0) { 1594 printf("pm_hash == kernel\n"); 1595 return; 1596 } 1597 hp = pmap->pm_hash; 1598 for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) { 1599 for (j = 0; j < 2; j++) { 1600 if (!hp->pmh_pte[j].high) 1601 continue; 1602 printf("%d: hi %x low %x\n", i, hp->pmh_pte[j].high, hp->pmh_pte[j].low); 1603 } 1604 } 1605 } 1606 #endif 1607