1 /* 2 * Copyright (c) 1987 Carnegie-Mellon University 3 * Copyright (c) 1991 Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * The Mach Operating System project at Carnegie-Mellon University. 8 * 9 * The CMU software License Agreement specifies the terms and conditions 10 * for use and redistribution. 11 * 12 * This version by William Jolitz for UUNET Technologies, Inc. 13 * 14 * Derived from hp300 version by Mike Hibler, this version by William 15 * Jolitz uses a recursive map [a pde points to the page directory] to 16 * map the page tables using the pagetables themselves. This is done to 17 * reduce the impact on kernel virtual memory for lots of sparse address 18 * space, and to reduce the cost of memory to each process. 19 * 20 * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90 21 * @(#)pmap.c 7.3 04/17/91 22 */ 23 24 /* 25 * Reno i386 version, from Mike Hibler's hp300 version. 26 */ 27 28 /* 29 * Manages physical address maps. 30 * 31 * In addition to hardware address maps, this 32 * module is called upon to provide software-use-only 33 * maps which may or may not be stored in the same 34 * form as hardware maps. These pseudo-maps are 35 * used to store intermediate results from copy 36 * operations to and from address spaces. 37 * 38 * Since the information managed by this module is 39 * also stored by the logical address mapping module, 40 * this module may throw away valid virtual-to-physical 41 * mappings at almost any time. However, invalidations 42 * of virtual-to-physical mappings must be done as 43 * requested. 44 * 45 * In order to cope with hardware architectures which 46 * make virtual-to-physical map invalidates expensive, 47 * this module may delay invalidate or reduced protection 48 * operations until such time as they are actually 49 * necessary. This module is given full information as 50 * to which processors are currently using which maps, 51 * and to when physical maps must be made correct. 52 */ 53 54 #include "param.h" 55 #include "../vm/vm_param.h" 56 #include "user.h" 57 #include "proc.h" 58 #include "lock.h" 59 #include "malloc.h" 60 61 #include "../vm/pmap.h" 62 #include "../vm/vm_map.h" 63 #include "../vm/vm_kern.h" 64 #include "../vm/vm_prot.h" 65 #include "../vm/vm_page.h" 66 #include "../vm/vm_pageout.h" 67 68 #include "machine/isa.h" 69 70 /* 71 * Allocate various and sundry SYSMAPs used in the days of old VM 72 * and not yet converted. XXX. 73 */ 74 #define BSDVM_COMPAT 1 75 76 #ifdef DEBUG 77 struct { 78 int kernel; /* entering kernel mapping */ 79 int user; /* entering user mapping */ 80 int ptpneeded; /* needed to allocate a PT page */ 81 int pwchange; /* no mapping change, just wiring or protection */ 82 int wchange; /* no mapping change, just wiring */ 83 int mchange; /* was mapped but mapping to different page */ 84 int managed; /* a managed page */ 85 int firstpv; /* first mapping for this PA */ 86 int secondpv; /* second mapping for this PA */ 87 int ci; /* cache inhibited */ 88 int unmanaged; /* not a managed page */ 89 int flushes; /* cache flushes */ 90 } enter_stats; 91 struct { 92 int calls; 93 int removes; 94 int pvfirst; 95 int pvsearch; 96 int ptinvalid; 97 int uflushes; 98 int sflushes; 99 } remove_stats; 100 101 int debugmap = 0; 102 int pmapdebug = 0 /* 0xffff */; 103 #define PDB_FOLLOW 0x0001 104 #define PDB_INIT 0x0002 105 #define PDB_ENTER 0x0004 106 #define PDB_REMOVE 0x0008 107 #define PDB_CREATE 0x0010 108 #define PDB_PTPAGE 0x0020 109 #define PDB_CACHE 0x0040 110 #define PDB_BITS 0x0080 111 #define PDB_COLLECT 0x0100 112 #define PDB_PROTECT 0x0200 113 #define PDB_PDRTAB 0x0400 114 #define PDB_PARANOIA 0x2000 115 #define PDB_WIRING 0x4000 116 #define PDB_PVDUMP 0x8000 117 118 int pmapvacflush = 0; 119 #define PVF_ENTER 0x01 120 #define PVF_REMOVE 0x02 121 #define PVF_PROTECT 0x04 122 #define PVF_TOTAL 0x80 123 #endif 124 125 /* 126 * Get PDEs and PTEs for user/kernel address space 127 */ 128 #define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023])) 129 130 #define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME) 131 132 #define pmap_pde_v(pte) ((pte)->pd_v) 133 #define pmap_pte_w(pte) ((pte)->pg_w) 134 /* #define pmap_pte_ci(pte) ((pte)->pg_ci) */ 135 #define pmap_pte_m(pte) ((pte)->pg_m) 136 #define pmap_pte_u(pte) ((pte)->pg_u) 137 #define pmap_pte_v(pte) ((pte)->pg_v) 138 #define pmap_pte_set_w(pte, v) ((pte)->pg_w = (v)) 139 #define pmap_pte_set_prot(pte, v) ((pte)->pg_prot = (v)) 140 141 /* 142 * Given a map and a machine independent protection code, 143 * convert to a vax protection code. 144 */ 145 #define pte_prot(m, p) (protection_codes[p]) 146 int protection_codes[8]; 147 148 struct pmap kernel_pmap_store; 149 pmap_t kernel_pmap; 150 151 vm_offset_t avail_start; /* PA of first available physical page */ 152 vm_offset_t avail_end; /* PA of last available physical page */ 153 vm_size_t mem_size; /* memory size in bytes */ 154 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/ 155 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 156 vm_offset_t vm_first_phys; /* PA of first managed page */ 157 vm_offset_t vm_last_phys; /* PA just past last managed page */ 158 int i386pagesperpage; /* PAGE_SIZE / I386_PAGE_SIZE */ 159 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 160 char *pmap_attributes; /* reference and modify bits */ 161 162 boolean_t pmap_testbit(); 163 void pmap_clear_modify(); 164 165 #if BSDVM_COMPAT 166 #include "msgbuf.h" 167 168 /* 169 * All those kernel PT submaps that BSD is so fond of 170 */ 171 struct pte *CMAP1, *CMAP2, *mmap; 172 caddr_t CADDR1, CADDR2, vmmap; 173 struct pte *msgbufmap; 174 struct msgbuf *msgbufp; 175 #endif 176 177 /* 178 * Bootstrap the system enough to run with virtual memory. 179 * Map the kernel's code and data, and allocate the system page table. 180 * 181 * On the I386 this is called after mapping has already been enabled 182 * and just syncs the pmap module with what has already been done. 183 * [We can't call it easily with mapping off since the kernel is not 184 * mapped with PA == VA, hence we would have to relocate every address 185 * from the linked base (virtual) address 0xFE000000 to the actual 186 * (physical) address starting relative to 0] 187 */ 188 struct pte *pmap_pte(); 189 190 extern vm_offset_t atdevbase; 191 int 192 pmap_bootstrap(firstaddr, loadaddr) 193 vm_offset_t firstaddr; 194 vm_offset_t loadaddr; 195 { 196 #if BSDVM_COMPAT 197 vm_offset_t va; 198 struct pte *pte; 199 #endif 200 extern vm_offset_t maxmem, physmem; 201 extern int IdlePTD; 202 203 avail_start = firstaddr; 204 avail_end = maxmem << PG_SHIFT; 205 206 /* XXX: allow for msgbuf */ 207 avail_end -= i386_round_page(sizeof(struct msgbuf)); 208 209 mem_size = physmem << PG_SHIFT; 210 virtual_avail = atdevbase + 0x100000 - 0xa0000 + 10*NBPG; 211 virtual_end = VM_MAX_KERNEL_ADDRESS; 212 printf("avail [%x %x] virtual [%x %x]\n", 213 avail_start, avail_end, virtual_avail, virtual_end); 214 printf("cr3 %x", rcr3()); 215 i386pagesperpage = PAGE_SIZE / I386_PAGE_SIZE; 216 217 /* 218 * Initialize protection array. 219 */ 220 i386_protection_init(); 221 222 /* 223 * The kernel's pmap is statically allocated so we don't 224 * have to use pmap_create, which is unlikely to work 225 * correctly at this part of the boot sequence. 226 */ 227 kernel_pmap = &kernel_pmap_store; 228 229 #ifdef notdef 230 /* 231 * Create Kernel page directory table and page maps. 232 * [ currently done in locore. i have wild and crazy ideas -wfj ] 233 */ 234 bzero(firstaddr, 4*NBPG); 235 kernel_pmap->pm_pdir = firstaddr + VM_MIN_KERNEL_ADDRESS; 236 kernel_pmap->pm_ptab = firstaddr + VM_MIN_KERNEL_ADDRESS + NBPG; 237 238 firstaddr += NBPG; 239 for (x = i386_btod(VM_MIN_KERNEL_ADDRESS); 240 x < i386_btod(VM_MIN_KERNEL_ADDRESS)+3; x++) { 241 struct pde *pde; 242 pde = kernel_pmap->pm_pdir + x; 243 *(int *)pde = firstaddr + x*NBPG | PG_V | PG_KW; 244 } 245 #else 246 kernel_pmap->pm_pdir = (pd_entry_t *)(0xfe000000 + IdlePTD); 247 #endif 248 249 250 simple_lock_init(&kernel_pmap->pm_lock); 251 kernel_pmap->pm_count = 1; 252 253 #if BSDVM_COMPAT 254 /* 255 * Allocate all the submaps we need 256 */ 257 #define SYSMAP(c, p, v, n) \ 258 v = (c)va; va += ((n)*I386_PAGE_SIZE); p = pte; pte += (n); 259 260 va = virtual_avail; 261 pte = pmap_pte(kernel_pmap, va); 262 263 SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 ) 264 SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 ) 265 SYSMAP(caddr_t ,mmap ,vmmap ,1 ) 266 SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 ) 267 virtual_avail = va; 268 #endif 269 270 /**(int *)PTD = 0; 271 load_cr3(rcr3());*/ 272 273 return (firstaddr); 274 } 275 276 /* 277 * Initialize the pmap module. 278 * Called by vm_init, to initialize any structures that the pmap 279 * system needs to map virtual memory. 280 */ 281 void 282 pmap_init(phys_start, phys_end) 283 vm_offset_t phys_start, phys_end; 284 { 285 vm_offset_t addr, addr2; 286 vm_size_t npg, s; 287 int rv; 288 extern int KPTphys; 289 290 #ifdef DEBUG 291 if (pmapdebug & PDB_FOLLOW) 292 printf("pmap_init(%x, %x)\n", phys_start, phys_end); 293 #endif 294 /* 295 * Now that kernel map has been allocated, we can mark as 296 * unavailable regions which we have mapped in locore. 297 */ 298 addr = atdevbase; 299 (void) vm_map_find(kernel_map, VM_OBJECT_NULL, (vm_offset_t) 0, 300 &addr, (0x100000-0xa0000), FALSE); 301 302 addr = (vm_offset_t) 0xfe000000+KPTphys/* *NBPG */; 303 vm_object_reference(kernel_object); 304 (void) vm_map_find(kernel_map, kernel_object, addr, 305 &addr, 2*NBPG, FALSE); 306 307 /* 308 * Allocate memory for random pmap data structures. Includes the 309 * pv_head_table and pmap_attributes. 310 */ 311 npg = atop(phys_end - phys_start); 312 s = (vm_size_t) (sizeof(struct pv_entry) * npg + npg); 313 s = round_page(s); 314 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 315 pv_table = (pv_entry_t) addr; 316 addr += sizeof(struct pv_entry) * npg; 317 pmap_attributes = (char *) addr; 318 #ifdef DEBUG 319 if (pmapdebug & PDB_INIT) 320 printf("pmap_init: %x bytes (%x pgs): tbl %x attr %x\n", 321 s, npg, pv_table, pmap_attributes); 322 #endif 323 324 /* 325 * Now it is safe to enable pv_table recording. 326 */ 327 vm_first_phys = phys_start; 328 vm_last_phys = phys_end; 329 pmap_initialized = TRUE; 330 } 331 332 /* 333 * Used to map a range of physical addresses into kernel 334 * virtual address space. 335 * 336 * For now, VM is already on, we only need to map the 337 * specified memory. 338 */ 339 vm_offset_t 340 pmap_map(virt, start, end, prot) 341 vm_offset_t virt; 342 vm_offset_t start; 343 vm_offset_t end; 344 int prot; 345 { 346 #ifdef DEBUG 347 if (pmapdebug & PDB_FOLLOW) 348 printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot); 349 #endif 350 while (start < end) { 351 pmap_enter(kernel_pmap, virt, start, prot, FALSE); 352 virt += PAGE_SIZE; 353 start += PAGE_SIZE; 354 } 355 return(virt); 356 } 357 358 /* 359 * Create and return a physical map. 360 * 361 * If the size specified for the map 362 * is zero, the map is an actual physical 363 * map, and may be referenced by the 364 * hardware. 365 * 366 * If the size specified is non-zero, 367 * the map will be used in software only, and 368 * is bounded by that size. 369 * 370 * [ just allocate a ptd and mark it uninitialize -- should we track 371 * with a table which process has which ptd? -wfj ] 372 */ 373 pmap_t 374 pmap_create(size) 375 vm_size_t size; 376 { 377 register pmap_t pmap; 378 379 #ifdef DEBUG 380 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 381 pg("pmap_create(%x)", size); 382 #endif 383 /* 384 * Software use map does not need a pmap 385 */ 386 if (size) 387 return(PMAP_NULL); 388 389 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); 390 if (pmap == PMAP_NULL) 391 panic("pmap_create: cannot allocate a pmap"); 392 393 /* 394 * No need to allocate page table space yet but we do need a 395 * valid page directory table. 396 */ 397 pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG); 398 bcopy(PTD+KPTDI_FIRST, pmap->pm_pdir+KPTDI_FIRST, 399 (KPTDI_LAST-KPTDI_FIRST+1)*4); 400 *(int *)(pmap->pm_pdir+PTDPTDI) = 401 (int)pmap_extract(kernel_pmap, pmap->pm_pdir) | PG_V | PG_URKW; 402 403 pmap->pm_dref = 0; 404 pmap->pm_count = 1; 405 simple_lock_init(&pmap->pm_lock); 406 pmap->pm_stats.resident_count = 0; 407 pmap->pm_stats.wired_count = 0; 408 pmap->pm_ptpages = 0; 409 return(pmap); 410 } 411 412 /* 413 * Retire the given physical map from service. 414 * Should only be called if the map contains 415 * no valid mappings. 416 */ 417 void 418 pmap_destroy(pmap) 419 register pmap_t pmap; 420 { 421 int count; 422 423 #ifdef DEBUG 424 if (pmapdebug & PDB_FOLLOW) 425 printf("pmap_destroy(%x)", pmap); 426 #endif 427 if (pmap == PMAP_NULL) 428 return; 429 430 simple_lock(&pmap->pm_lock); 431 /*count = --pmap->pm_count;*/ 432 simple_unlock(&pmap->pm_lock); 433 /*if (count) 434 return;*/ 435 436 kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG); 437 free((caddr_t)pmap, M_VMPMAP); 438 } 439 440 /* 441 * Add a reference to the specified pmap. 442 */ 443 void 444 pmap_reference(pmap) 445 pmap_t pmap; 446 { 447 #ifdef DEBUG 448 if (pmapdebug & PDB_FOLLOW) 449 printf("pmap_reference(%x)", pmap); 450 #endif 451 if (pmap != PMAP_NULL) { 452 simple_lock(&pmap->pm_lock); 453 pmap->pm_count++; 454 simple_unlock(&pmap->pm_lock); 455 } 456 } 457 458 /* 459 * Remove the given range of addresses from the specified map. 460 * 461 * It is assumed that the start and end are properly 462 * rounded to the page size. 463 */ 464 void 465 pmap_remove(pmap, sva, eva) 466 register pmap_t pmap; 467 vm_offset_t sva, eva; 468 { 469 register vm_offset_t pa, va; 470 register pt_entry_t *pte; 471 register pv_entry_t pv, npv; 472 register int ix; 473 pmap_t ptpmap; 474 int *pde, s, bits; 475 boolean_t firstpage = TRUE; 476 boolean_t flushcache = FALSE; 477 #ifdef DEBUG 478 pt_entry_t opte; 479 480 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 481 pg("pmap_remove(%x, %x, %x)", pmap, sva, eva); 482 #endif 483 484 if (pmap == PMAP_NULL) 485 return; 486 487 #ifdef DEBUG 488 remove_stats.calls++; 489 #endif 490 for (va = sva; va < eva; va += PAGE_SIZE) { 491 /* 492 * Weed out invalid mappings. 493 * Note: we assume that the page directory table is 494 * always allocated, and in kernel virtual. 495 */ 496 if (!pmap_pde_v(pmap_pde(pmap, va))) 497 continue; 498 499 pte = pmap_pte(pmap, va); 500 if (pte == 0) 501 continue; 502 pa = pmap_pte_pa(pte); 503 if (pa == 0) 504 continue; 505 #ifdef DEBUG 506 opte = *pte; 507 remove_stats.removes++; 508 #endif 509 /* 510 * Update statistics 511 */ 512 if (pmap_pte_w(pte)) 513 pmap->pm_stats.wired_count--; 514 pmap->pm_stats.resident_count--; 515 516 /* 517 * Invalidate the PTEs. 518 * XXX: should cluster them up and invalidate as many 519 * as possible at once. 520 */ 521 #ifdef DEBUG 522 if (pmapdebug & PDB_REMOVE) 523 printf("remove: inv %x ptes at %x(%x) ", 524 i386pagesperpage, pte, *(int *)pte); 525 #endif 526 bits = ix = 0; 527 do { 528 bits |= *(int *)pte & (PG_U|PG_M); 529 *(int *)pte++ = 0; 530 /*TBIS(va + ix * I386_PAGE_SIZE);*/ 531 } while (++ix != i386pagesperpage); 532 if (pmap == u.u_procp->p_map->pmap) 533 pmap_activate(pmap, (struct pcb *)u.u_procp->p_addr); 534 load_cr3(u.u_pcb.pcb_ptd); 535 536 #ifdef needednotdone 537 reduce wiring count on page table pages as references drop 538 #endif 539 540 /* 541 * Remove from the PV table (raise IPL since we 542 * may be called at interrupt time). 543 */ 544 if (pa < vm_first_phys || pa >= vm_last_phys) 545 continue; 546 pv = pa_to_pvh(pa); 547 s = splimp(); 548 /* 549 * If it is the first entry on the list, it is actually 550 * in the header and we must copy the following entry up 551 * to the header. Otherwise we must search the list for 552 * the entry. In either case we free the now unused entry. 553 */ 554 if (pmap == pv->pv_pmap && va == pv->pv_va) { 555 npv = pv->pv_next; 556 if (npv) { 557 *pv = *npv; 558 free((caddr_t)npv, M_VMPVENT); 559 } else 560 pv->pv_pmap = PMAP_NULL; 561 #ifdef DEBUG 562 remove_stats.pvfirst++; 563 #endif 564 } else { 565 for (npv = pv->pv_next; npv; npv = npv->pv_next) { 566 #ifdef DEBUG 567 remove_stats.pvsearch++; 568 #endif 569 if (pmap == npv->pv_pmap && va == npv->pv_va) 570 break; 571 pv = npv; 572 } 573 #ifdef DEBUG 574 if (npv == PV_ENTRY_NULL) 575 panic("pmap_remove: PA not in pv_tab"); 576 #endif 577 pv->pv_next = npv->pv_next; 578 free((caddr_t)npv, M_VMPVENT); 579 pv = pa_to_pvh(pa); 580 } 581 582 #ifdef notdef 583 [tally number of pagetable pages, if sharing of ptpages adjust here] 584 #endif 585 /* 586 * Update saved attributes for managed page 587 */ 588 pmap_attributes[pa_index(pa)] |= bits; 589 splx(s); 590 } 591 #ifdef notdef 592 [cache and tlb flushing, if needed] 593 #endif 594 } 595 596 /* 597 * Routine: pmap_remove_all 598 * Function: 599 * Removes this physical page from 600 * all physical maps in which it resides. 601 * Reflects back modify bits to the pager. 602 */ 603 void 604 pmap_remove_all(pa) 605 vm_offset_t pa; 606 { 607 register pv_entry_t pv; 608 int s; 609 610 #ifdef DEBUG 611 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 612 printf("pmap_remove_all(%x)", pa); 613 /*pmap_pvdump(pa);*/ 614 #endif 615 /* 616 * Not one of ours 617 */ 618 if (pa < vm_first_phys || pa >= vm_last_phys) 619 return; 620 621 pv = pa_to_pvh(pa); 622 s = splimp(); 623 /* 624 * Do it the easy way for now 625 */ 626 while (pv->pv_pmap != PMAP_NULL) { 627 #ifdef DEBUG 628 if (!pmap_pde_v(pmap_pde(pv->pv_pmap, pv->pv_va)) || 629 pmap_pte_pa(pmap_pte(pv->pv_pmap, pv->pv_va)) != pa) 630 panic("pmap_remove_all: bad mapping"); 631 #endif 632 pmap_remove(pv->pv_pmap, pv->pv_va, pv->pv_va + PAGE_SIZE); 633 } 634 splx(s); 635 } 636 637 /* 638 * Routine: pmap_copy_on_write 639 * Function: 640 * Remove write privileges from all 641 * physical maps for this physical page. 642 */ 643 void 644 pmap_copy_on_write(pa) 645 vm_offset_t pa; 646 { 647 #ifdef DEBUG 648 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 649 printf("pmap_copy_on_write(%x)", pa); 650 #endif 651 pmap_changebit(pa, PG_RO, TRUE); 652 } 653 654 /* 655 * Set the physical protection on the 656 * specified range of this map as requested. 657 */ 658 void 659 pmap_protect(pmap, sva, eva, prot) 660 register pmap_t pmap; 661 vm_offset_t sva, eva; 662 vm_prot_t prot; 663 { 664 register pt_entry_t *pte; 665 register vm_offset_t va; 666 register int ix; 667 int i386prot; 668 boolean_t firstpage = TRUE; 669 670 #ifdef DEBUG 671 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 672 printf("pmap_protect(%x, %x, %x, %x)", pmap, sva, eva, prot); 673 #endif 674 if (pmap == PMAP_NULL) 675 return; 676 677 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 678 pmap_remove(pmap, sva, eva); 679 return; 680 } 681 if (prot & VM_PROT_WRITE) 682 return; 683 684 pte = pmap_pte(pmap, sva); 685 if(!pte) return; 686 for (va = sva; va < eva; va += PAGE_SIZE) { 687 /* 688 * Page table page is not allocated. 689 * Skip it, we don't want to force allocation 690 * of unnecessary PTE pages just to set the protection. 691 */ 692 if (!pmap_pde_v(pmap_pde(pmap, va))) { 693 /* XXX: avoid address wrap around */ 694 if (va >= i386_trunc_pdr((vm_offset_t)-1)) 695 break; 696 va = i386_round_pdr(va + PAGE_SIZE) - PAGE_SIZE; 697 pte = pmap_pte(pmap, va); 698 pte += i386pagesperpage; 699 continue; 700 } 701 if(!pte) return; 702 /* 703 * Page not valid. Again, skip it. 704 * Should we do this? Or set protection anyway? 705 */ 706 if (!pmap_pte_v(pte)) { 707 pte += i386pagesperpage; 708 continue; 709 } 710 ix = 0; 711 i386prot = pte_prot(pmap, prot); 712 if(va < UPT_MAX_ADDRESS) 713 i386prot |= 2 /*PG_u*/; 714 do { 715 /* clear VAC here if PG_RO? */ 716 pmap_pte_set_prot(pte++, i386prot); 717 /*TBIS(va + ix * I386_PAGE_SIZE);*/ 718 } while (++ix != i386pagesperpage); 719 } 720 if (pmap == u.u_procp->p_map->pmap) 721 pmap_activate(pmap, (struct pcb *)u.u_procp->p_addr); 722 } 723 724 /* 725 * Insert the given physical page (p) at 726 * the specified virtual address (v) in the 727 * target physical map with the protection requested. 728 * 729 * If specified, the page will be wired down, meaning 730 * that the related pte can not be reclaimed. 731 * 732 * NB: This is the only routine which MAY NOT lazy-evaluate 733 * or lose information. That is, this routine must actually 734 * insert this page into the given map NOW. 735 */ 736 void 737 pmap_enter(pmap, va, pa, prot, wired) 738 register pmap_t pmap; 739 vm_offset_t va; 740 register vm_offset_t pa; 741 vm_prot_t prot; 742 boolean_t wired; 743 { 744 register pt_entry_t *pte; 745 register int npte, ix; 746 vm_offset_t opa; 747 boolean_t cacheable = TRUE; 748 boolean_t checkpv = TRUE; 749 750 #ifdef DEBUG 751 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 752 printf("pmap_enter(%x, %x, %x, %x, %x)", 753 pmap, va, pa, prot, wired); 754 #endif 755 if (pmap == PMAP_NULL) 756 return; 757 758 if(va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig"); 759 /* also, should not muck with PTD va! */ 760 761 #ifdef DEBUG 762 if (pmap == kernel_pmap) 763 enter_stats.kernel++; 764 else 765 enter_stats.user++; 766 #endif 767 768 /* 769 * Page Directory table entry not valid, we need a new PT page 770 */ 771 if (!pmap_pde_v(pmap_pde(pmap, va))) { 772 pg("ptdi %x", pmap->pm_pdir[PTDPTDI]); 773 } 774 775 pte = pmap_pte(pmap, va); 776 opa = pmap_pte_pa(pte); 777 #ifdef DEBUG 778 if (pmapdebug & PDB_ENTER) 779 printf("enter: pte %x, *pte %x ", pte, *(int *)pte); 780 #endif 781 782 /* 783 * Mapping has not changed, must be protection or wiring change. 784 */ 785 if (opa == pa) { 786 #ifdef DEBUG 787 enter_stats.pwchange++; 788 #endif 789 /* 790 * Wiring change, just update stats. 791 * We don't worry about wiring PT pages as they remain 792 * resident as long as there are valid mappings in them. 793 * Hence, if a user page is wired, the PT page will be also. 794 */ 795 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { 796 #ifdef DEBUG 797 if (pmapdebug & PDB_ENTER) 798 pg("enter: wiring change -> %x ", wired); 799 #endif 800 if (wired) 801 pmap->pm_stats.wired_count++; 802 else 803 pmap->pm_stats.wired_count--; 804 #ifdef DEBUG 805 enter_stats.wchange++; 806 #endif 807 } 808 goto validate; 809 } 810 811 /* 812 * Mapping has changed, invalidate old range and fall through to 813 * handle validating new mapping. 814 */ 815 if (opa) { 816 #ifdef DEBUG 817 if (pmapdebug & PDB_ENTER) 818 printf("enter: removing old mapping %x pa %x ", va, opa); 819 #endif 820 pmap_remove(pmap, va, va + PAGE_SIZE); 821 #ifdef DEBUG 822 enter_stats.mchange++; 823 #endif 824 } 825 826 /* 827 * Enter on the PV list if part of our managed memory 828 * Note that we raise IPL while manipulating pv_table 829 * since pmap_enter can be called at interrupt time. 830 */ 831 if (pa >= vm_first_phys && pa < vm_last_phys) { 832 register pv_entry_t pv, npv; 833 int s; 834 835 #ifdef DEBUG 836 enter_stats.managed++; 837 #endif 838 pv = pa_to_pvh(pa); 839 s = splimp(); 840 #ifdef DEBUG 841 if (pmapdebug & PDB_ENTER) 842 printf("enter: pv at %x: %x/%x/%x ", 843 pv, pv->pv_va, pv->pv_pmap, pv->pv_next); 844 #endif 845 /* 846 * No entries yet, use header as the first entry 847 */ 848 if (pv->pv_pmap == PMAP_NULL) { 849 #ifdef DEBUG 850 enter_stats.firstpv++; 851 #endif 852 pv->pv_va = va; 853 pv->pv_pmap = pmap; 854 pv->pv_next = PV_ENTRY_NULL; 855 pv->pv_flags = 0; 856 } 857 /* 858 * There is at least one other VA mapping this page. 859 * Place this entry after the header. 860 */ 861 else { 862 /*printf("second time: ");*/ 863 #ifdef DEBUG 864 for (npv = pv; npv; npv = npv->pv_next) 865 if (pmap == npv->pv_pmap && va == npv->pv_va) 866 panic("pmap_enter: already in pv_tab"); 867 #endif 868 npv = (pv_entry_t) 869 malloc(sizeof *npv, M_VMPVENT, M_NOWAIT); 870 npv->pv_va = va; 871 npv->pv_pmap = pmap; 872 npv->pv_next = pv->pv_next; 873 pv->pv_next = npv; 874 #ifdef DEBUG 875 if (!npv->pv_next) 876 enter_stats.secondpv++; 877 #endif 878 splx(s); 879 } 880 } 881 /* 882 * Assumption: if it is not part of our managed memory 883 * then it must be device memory which may be volitile. 884 */ 885 if (pmap_initialized) { 886 checkpv = cacheable = FALSE; 887 #ifdef DEBUG 888 enter_stats.unmanaged++; 889 #endif 890 } 891 892 /* 893 * Increment counters 894 */ 895 pmap->pm_stats.resident_count++; 896 if (wired) 897 pmap->pm_stats.wired_count++; 898 899 validate: 900 /* 901 * Now validate mapping with desired protection/wiring. 902 * Assume uniform modified and referenced status for all 903 * I386 pages in a MACH page. 904 */ 905 npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V; 906 npte |= (*(int *)pte & (PG_M|PG_U)); 907 if (wired) 908 npte |= PG_W; 909 if(va < UPT_MIN_ADDRESS) 910 npte |= PG_u; 911 else if(va < UPT_MAX_ADDRESS) 912 npte |= PG_u | PG_RW; 913 #ifdef DEBUG 914 if (pmapdebug & PDB_ENTER) 915 printf("enter: new pte value %x ", npte); 916 #endif 917 ix = 0; 918 do { 919 *(int *)pte++ = npte; 920 /*TBIS(va);*/ 921 npte += I386_PAGE_SIZE; 922 va += I386_PAGE_SIZE; 923 } while (++ix != i386pagesperpage); 924 pte--; 925 #ifdef DEBUGx 926 cache, tlb flushes 927 #endif 928 /*pads(pmap);*/ 929 load_cr3(u.u_pcb.pcb_ptd); 930 } 931 932 /* 933 * Routine: pmap_change_wiring 934 * Function: Change the wiring attribute for a map/virtual-address 935 * pair. 936 * In/out conditions: 937 * The mapping must already exist in the pmap. 938 */ 939 void 940 pmap_change_wiring(pmap, va, wired) 941 register pmap_t pmap; 942 vm_offset_t va; 943 boolean_t wired; 944 { 945 register pt_entry_t *pte; 946 register int ix; 947 948 #ifdef DEBUG 949 if (pmapdebug & PDB_FOLLOW) 950 printf("pmap_change_wiring(%x, %x, %x)", pmap, va, wired); 951 #endif 952 if (pmap == PMAP_NULL) 953 return; 954 955 pte = pmap_pte(pmap, va); 956 #ifdef DEBUG 957 /* 958 * Page table page is not allocated. 959 * Should this ever happen? Ignore it for now, 960 * we don't want to force allocation of unnecessary PTE pages. 961 */ 962 if (!pmap_pde_v(pmap_pde(pmap, va))) { 963 if (pmapdebug & PDB_PARANOIA) 964 pg("pmap_change_wiring: invalid PDE for %x ", va); 965 return; 966 } 967 /* 968 * Page not valid. Should this ever happen? 969 * Just continue and change wiring anyway. 970 */ 971 if (!pmap_pte_v(pte)) { 972 if (pmapdebug & PDB_PARANOIA) 973 pg("pmap_change_wiring: invalid PTE for %x ", va); 974 } 975 #endif 976 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { 977 if (wired) 978 pmap->pm_stats.wired_count++; 979 else 980 pmap->pm_stats.wired_count--; 981 } 982 /* 983 * Wiring is not a hardware characteristic so there is no need 984 * to invalidate TLB. 985 */ 986 ix = 0; 987 do { 988 pmap_pte_set_w(pte++, wired); 989 } while (++ix != i386pagesperpage); 990 } 991 992 /* 993 * Routine: pmap_pte 994 * Function: 995 * Extract the page table entry associated 996 * with the given map/virtual_address pair. 997 * [ what about induced faults -wfj] 998 */ 999 1000 struct pte *pmap_pte(pmap, va) 1001 register pmap_t pmap; 1002 vm_offset_t va; 1003 { 1004 1005 #ifdef DEBUGx 1006 if (pmapdebug & PDB_FOLLOW) 1007 printf("pmap_pte(%x, %x) ->\n", pmap, va); 1008 #endif 1009 if (pmap && pmap_pde_v(pmap_pde(pmap, va))) { 1010 1011 /* are we current address space or kernel? */ 1012 if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum 1013 || pmap == kernel_pmap) 1014 return ((struct pte *) vtopte(va)); 1015 1016 /* otherwise, we are alternate address space */ 1017 else { 1018 if (pmap->pm_pdir[PTDPTDI].pd_pfnum 1019 != APTDpde.pd_pfnum) { 1020 APTDpde = pmap->pm_pdir[PTDPTDI]; 1021 load_cr3(u.u_pcb.pcb_ptd); 1022 } 1023 return((struct pte *) avtopte(va)); 1024 } 1025 } 1026 return(0); 1027 } 1028 1029 /* 1030 * Routine: pmap_extract 1031 * Function: 1032 * Extract the physical page address associated 1033 * with the given map/virtual_address pair. 1034 */ 1035 1036 vm_offset_t 1037 pmap_extract(pmap, va) 1038 register pmap_t pmap; 1039 vm_offset_t va; 1040 { 1041 register vm_offset_t pa; 1042 1043 #ifdef DEBUGx 1044 if (pmapdebug & PDB_FOLLOW) 1045 pg("pmap_extract(%x, %x) -> ", pmap, va); 1046 #endif 1047 pa = 0; 1048 if (pmap && pmap_pde_v(pmap_pde(pmap, va))) { 1049 pa = *(int *) pmap_pte(pmap, va); 1050 } 1051 if (pa) 1052 pa = (pa & PG_FRAME) | (va & ~PG_FRAME); 1053 #ifdef DEBUGx 1054 if (pmapdebug & PDB_FOLLOW) 1055 printf("%x\n", pa); 1056 #endif 1057 return(pa); 1058 } 1059 1060 /* 1061 * Copy the range specified by src_addr/len 1062 * from the source map to the range dst_addr/len 1063 * in the destination map. 1064 * 1065 * This routine is only advisory and need not do anything. 1066 */ 1067 void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 1068 pmap_t dst_pmap; 1069 pmap_t src_pmap; 1070 vm_offset_t dst_addr; 1071 vm_size_t len; 1072 vm_offset_t src_addr; 1073 { 1074 #ifdef DEBUG 1075 if (pmapdebug & PDB_FOLLOW) 1076 printf("pmap_copy(%x, %x, %x, %x, %x)", 1077 dst_pmap, src_pmap, dst_addr, len, src_addr); 1078 #endif 1079 } 1080 1081 /* 1082 * Require that all active physical maps contain no 1083 * incorrect entries NOW. [This update includes 1084 * forcing updates of any address map caching.] 1085 * 1086 * Generally used to insure that a thread about 1087 * to run will see a semantically correct world. 1088 */ 1089 void pmap_update() 1090 { 1091 #ifdef DEBUG 1092 if (pmapdebug & PDB_FOLLOW) 1093 printf("pmap_update()"); 1094 #endif 1095 load_cr3(u.u_pcb.pcb_ptd); 1096 } 1097 1098 /* 1099 * Routine: pmap_collect 1100 * Function: 1101 * Garbage collects the physical map system for 1102 * pages which are no longer used. 1103 * Success need not be guaranteed -- that is, there 1104 * may well be pages which are not referenced, but 1105 * others may be collected. 1106 * Usage: 1107 * Called by the pageout daemon when pages are scarce. 1108 * [ needs to be written -wfj ] 1109 */ 1110 void 1111 pmap_collect(pmap) 1112 pmap_t pmap; 1113 { 1114 register vm_offset_t pa; 1115 register pv_entry_t pv; 1116 register int *pte; 1117 vm_offset_t kpa; 1118 int s; 1119 1120 #ifdef DEBUG 1121 int *pde; 1122 int opmapdebug; 1123 #endif 1124 printf("pmap_collect(%x) ", pmap); 1125 if (pmap != kernel_pmap) 1126 return; 1127 1128 } 1129 1130 /* [ macro again?, should I force u. into user map here? -wfj ] */ 1131 void 1132 pmap_activate(pmap, pcbp) 1133 register pmap_t pmap; 1134 struct pcb *pcbp; 1135 { 1136 int x; 1137 #ifdef DEBUG 1138 if (pmapdebug & (PDB_FOLLOW|PDB_PDRTAB)) 1139 pg("pmap_activate(%x, %x) ", pmap, pcbp); 1140 #endif 1141 PMAP_ACTIVATE(pmap, pcbp); 1142 /*printf("pde "); 1143 for(x=0x3f6; x < 0x3fA; x++) 1144 printf("%x ", pmap->pm_pdir[x]);*/ 1145 /*pads(pmap);*/ 1146 /*pg(" pcb_cr3 %x", pcbp->pcb_cr3);*/ 1147 } 1148 1149 /* 1150 * Routine: pmap_kernel 1151 * Function: 1152 * Returns the physical map handle for the kernel. 1153 */ 1154 pmap_t 1155 pmap_kernel() 1156 { 1157 return (kernel_pmap); 1158 } 1159 1160 /* 1161 * pmap_zero_page zeros the specified (machine independent) 1162 * page by mapping the page into virtual memory and using 1163 * bzero to clear its contents, one machine dependent page 1164 * at a time. 1165 */ 1166 pmap_zero_page(phys) 1167 register vm_offset_t phys; 1168 { 1169 register int ix; 1170 1171 #ifdef DEBUG 1172 if (pmapdebug & PDB_FOLLOW) 1173 printf("pmap_zero_page(%x)", phys); 1174 #endif 1175 phys >>= PG_SHIFT; 1176 ix = 0; 1177 do { 1178 clearseg(phys++); 1179 } while (++ix != i386pagesperpage); 1180 } 1181 1182 /* 1183 * pmap_copy_page copies the specified (machine independent) 1184 * page by mapping the page into virtual memory and using 1185 * bcopy to copy the page, one machine dependent page at a 1186 * time. 1187 */ 1188 pmap_copy_page(src, dst) 1189 register vm_offset_t src, dst; 1190 { 1191 register int ix; 1192 1193 #ifdef DEBUG 1194 if (pmapdebug & PDB_FOLLOW) 1195 printf("pmap_copy_page(%x, %x)", src, dst); 1196 #endif 1197 src >>= PG_SHIFT; 1198 dst >>= PG_SHIFT; 1199 ix = 0; 1200 do { 1201 physcopyseg(src++, dst++); 1202 } while (++ix != i386pagesperpage); 1203 } 1204 1205 1206 /* 1207 * Routine: pmap_pageable 1208 * Function: 1209 * Make the specified pages (by pmap, offset) 1210 * pageable (or not) as requested. 1211 * 1212 * A page which is not pageable may not take 1213 * a fault; therefore, its page table entry 1214 * must remain valid for the duration. 1215 * 1216 * This routine is merely advisory; pmap_enter 1217 * will specify that these pages are to be wired 1218 * down (or not) as appropriate. 1219 */ 1220 pmap_pageable(pmap, sva, eva, pageable) 1221 pmap_t pmap; 1222 vm_offset_t sva, eva; 1223 boolean_t pageable; 1224 { 1225 #ifdef DEBUG 1226 if (pmapdebug & PDB_FOLLOW) 1227 printf("pmap_pageable(%x, %x, %x, %x)", 1228 pmap, sva, eva, pageable); 1229 #endif 1230 /* 1231 * If we are making a PT page pageable then all valid 1232 * mappings must be gone from that page. Hence it should 1233 * be all zeros and there is no need to clean it. 1234 * Assumptions: 1235 * - we are called with only one page at a time 1236 * - PT pages have only one pv_table entry 1237 */ 1238 if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) { 1239 register pv_entry_t pv; 1240 register vm_offset_t pa; 1241 1242 #ifdef DEBUG 1243 if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE) 1244 printf("pmap_pageable(%x, %x, %x, %x)", 1245 pmap, sva, eva, pageable); 1246 #endif 1247 /*if (!pmap_pde_v(pmap_pde(pmap, sva))) 1248 return;*/ 1249 if(pmap_pte(pmap, sva) == 0) 1250 return; 1251 pa = pmap_pte_pa(pmap_pte(pmap, sva)); 1252 if (pa < vm_first_phys || pa >= vm_last_phys) 1253 return; 1254 pv = pa_to_pvh(pa); 1255 /*if (!ispt(pv->pv_va)) 1256 return;*/ 1257 #ifdef DEBUG 1258 if (pv->pv_va != sva || pv->pv_next) { 1259 pg("pmap_pageable: bad PT page va %x next %x\n", 1260 pv->pv_va, pv->pv_next); 1261 return; 1262 } 1263 #endif 1264 /* 1265 * Mark it unmodified to avoid pageout 1266 */ 1267 pmap_clear_modify(pa); 1268 #ifdef needsomethinglikethis 1269 if (pmapdebug & PDB_PTPAGE) 1270 pg("pmap_pageable: PT page %x(%x) unmodified\n", 1271 sva, *(int *)pmap_pte(pmap, sva)); 1272 if (pmapdebug & PDB_WIRING) 1273 pmap_check_wiring("pageable", sva); 1274 #endif 1275 } 1276 } 1277 1278 /* 1279 * Clear the modify bits on the specified physical page. 1280 */ 1281 1282 void 1283 pmap_clear_modify(pa) 1284 vm_offset_t pa; 1285 { 1286 #ifdef DEBUG 1287 if (pmapdebug & PDB_FOLLOW) 1288 printf("pmap_clear_modify(%x)", pa); 1289 #endif 1290 pmap_changebit(pa, PG_M, FALSE); 1291 } 1292 1293 /* 1294 * pmap_clear_reference: 1295 * 1296 * Clear the reference bit on the specified physical page. 1297 */ 1298 1299 void pmap_clear_reference(pa) 1300 vm_offset_t pa; 1301 { 1302 #ifdef DEBUG 1303 if (pmapdebug & PDB_FOLLOW) 1304 printf("pmap_clear_reference(%x)", pa); 1305 #endif 1306 pmap_changebit(pa, PG_U, FALSE); 1307 } 1308 1309 /* 1310 * pmap_is_referenced: 1311 * 1312 * Return whether or not the specified physical page is referenced 1313 * by any physical maps. 1314 */ 1315 1316 boolean_t 1317 pmap_is_referenced(pa) 1318 vm_offset_t pa; 1319 { 1320 #ifdef DEBUG 1321 if (pmapdebug & PDB_FOLLOW) { 1322 boolean_t rv = pmap_testbit(pa, PG_U); 1323 printf("pmap_is_referenced(%x) -> %c", pa, "FT"[rv]); 1324 return(rv); 1325 } 1326 #endif 1327 return(pmap_testbit(pa, PG_U)); 1328 } 1329 1330 /* 1331 * pmap_is_modified: 1332 * 1333 * Return whether or not the specified physical page is modified 1334 * by any physical maps. 1335 */ 1336 1337 boolean_t 1338 pmap_is_modified(pa) 1339 vm_offset_t pa; 1340 { 1341 #ifdef DEBUG 1342 if (pmapdebug & PDB_FOLLOW) { 1343 boolean_t rv = pmap_testbit(pa, PG_M); 1344 printf("pmap_is_modified(%x) -> %c", pa, "FT"[rv]); 1345 return(rv); 1346 } 1347 #endif 1348 return(pmap_testbit(pa, PG_M)); 1349 } 1350 1351 vm_offset_t 1352 pmap_phys_address(ppn) 1353 int ppn; 1354 { 1355 return(i386_ptob(ppn)); 1356 } 1357 1358 /* 1359 * Miscellaneous support routines follow 1360 */ 1361 1362 static 1363 i386_protection_init() 1364 { 1365 register int *kp, prot; 1366 1367 kp = protection_codes; 1368 for (prot = 0; prot < 8; prot++) { 1369 switch (prot) { 1370 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 1371 *kp++ = 0; 1372 break; 1373 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 1374 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 1375 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 1376 *kp++ = PG_RO; 1377 break; 1378 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 1379 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 1380 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 1381 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 1382 *kp++ = PG_RW; 1383 break; 1384 } 1385 } 1386 } 1387 1388 static 1389 boolean_t 1390 pmap_testbit(pa, bit) 1391 register vm_offset_t pa; 1392 int bit; 1393 { 1394 register pv_entry_t pv; 1395 register int *pte, ix; 1396 int s; 1397 1398 if (pa < vm_first_phys || pa >= vm_last_phys) 1399 return(FALSE); 1400 1401 pv = pa_to_pvh(pa); 1402 s = splimp(); 1403 /* 1404 * Check saved info first 1405 */ 1406 if (pmap_attributes[pa_index(pa)] & bit) { 1407 splx(s); 1408 return(TRUE); 1409 } 1410 /* 1411 * Not found, check current mappings returning 1412 * immediately if found. 1413 */ 1414 if (pv->pv_pmap != PMAP_NULL) { 1415 for (; pv; pv = pv->pv_next) { 1416 pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va); 1417 ix = 0; 1418 do { 1419 if (*pte++ & bit) { 1420 splx(s); 1421 return(TRUE); 1422 } 1423 } while (++ix != i386pagesperpage); 1424 } 1425 } 1426 splx(s); 1427 return(FALSE); 1428 } 1429 1430 static 1431 pmap_changebit(pa, bit, setem) 1432 register vm_offset_t pa; 1433 int bit; 1434 boolean_t setem; 1435 { 1436 register pv_entry_t pv; 1437 register int *pte, npte, ix; 1438 vm_offset_t va; 1439 int s; 1440 boolean_t firstpage = TRUE; 1441 1442 #ifdef DEBUG 1443 if (pmapdebug & PDB_BITS) 1444 printf("pmap_changebit(%x, %x, %s)", 1445 pa, bit, setem ? "set" : "clear"); 1446 #endif 1447 if (pa < vm_first_phys || pa >= vm_last_phys) 1448 return; 1449 1450 pv = pa_to_pvh(pa); 1451 s = splimp(); 1452 /* 1453 * Clear saved attributes (modify, reference) 1454 */ 1455 if (!setem) 1456 pmap_attributes[pa_index(pa)] &= ~bit; 1457 /* 1458 * Loop over all current mappings setting/clearing as appropos 1459 * If setting RO do we need to clear the VAC? 1460 */ 1461 if (pv->pv_pmap != PMAP_NULL) { 1462 #ifdef DEBUG 1463 int toflush = 0; 1464 #endif 1465 for (; pv; pv = pv->pv_next) { 1466 #ifdef DEBUG 1467 toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1; 1468 #endif 1469 va = pv->pv_va; 1470 pte = (int *) pmap_pte(pv->pv_pmap, va); 1471 ix = 0; 1472 do { 1473 if (setem) 1474 npte = *pte | bit; 1475 else 1476 npte = *pte & ~bit; 1477 if (*pte != npte) { 1478 *pte = npte; 1479 /*TBIS(va);*/ 1480 } 1481 va += I386_PAGE_SIZE; 1482 pte++; 1483 } while (++ix != i386pagesperpage); 1484 1485 if (pv->pv_pmap == u.u_procp->p_map->pmap) 1486 pmap_activate(pv->pv_pmap, (struct pcb *)u.u_procp->p_addr); 1487 } 1488 #ifdef somethinglikethis 1489 if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) { 1490 if ((pmapvacflush & PVF_TOTAL) || toflush == 3) 1491 DCIA(); 1492 else if (toflush == 2) 1493 DCIS(); 1494 else 1495 DCIU(); 1496 } 1497 #endif 1498 } 1499 splx(s); 1500 } 1501 1502 #ifdef DEBUG 1503 pmap_pvdump(pa) 1504 vm_offset_t pa; 1505 { 1506 register pv_entry_t pv; 1507 1508 printf("pa %x", pa); 1509 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) { 1510 printf(" -> pmap %x, va %x, flags %x", 1511 pv->pv_pmap, pv->pv_va, pv->pv_flags); 1512 pads(pv->pv_pmap); 1513 } 1514 printf(" "); 1515 } 1516 1517 #ifdef notyet 1518 pmap_check_wiring(str, va) 1519 char *str; 1520 vm_offset_t va; 1521 { 1522 vm_map_entry_t entry; 1523 register int count, *pte; 1524 1525 va = trunc_page(va); 1526 if (!pmap_pde_v(pmap_pde(kernel_pmap, va)) || 1527 !pmap_pte_v(pmap_pte(kernel_pmap, va))) 1528 return; 1529 1530 if (!vm_map_lookup_entry(pt_map, va, &entry)) { 1531 pg("wired_check: entry for %x not found\n", va); 1532 return; 1533 } 1534 count = 0; 1535 for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++) 1536 if (*pte) 1537 count++; 1538 if (entry->wired_count != count) 1539 pg("*%s*: %x: w%d/a%d\n", 1540 str, va, entry->wired_count, count); 1541 } 1542 #endif 1543 1544 /* print address space of pmap*/ 1545 pads(pm) pmap_t pm; { 1546 unsigned va, i, j; 1547 struct pte *ptep; 1548 1549 if(pm == kernel_pmap) return; 1550 for (i = 0; i < 1024; i++) 1551 if(pm->pm_pdir[i].pd_v) 1552 for (j = 0; j < 1024 ; j++) { 1553 va = (i<<22)+(j<<12); 1554 if (pm == kernel_pmap && va < 0xfe000000) 1555 continue; 1556 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 1557 continue; 1558 ptep = pmap_pte(pm, va); 1559 if(pmap_pte_v(ptep)) 1560 printf("%x:%x ", va, *(int *)ptep); 1561 } ; 1562 1563 } 1564 #endif 1565