1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department and William Jolitz of UUNET Technologies Inc. 8 * 9 * %sccs.include.redist.c% 10 * 11 * @(#)pmap.c 7.7 (Berkeley) 05/12/91 12 */ 13 14 /* 15 * Derived from hp300 version by Mike Hibler, this version by William 16 * Jolitz uses a recursive map [a pde points to the page directory] to 17 * map the page tables using the pagetables themselves. This is done to 18 * reduce the impact on kernel virtual memory for lots of sparse address 19 * space, and to reduce the cost of memory to each process. 20 * 21 * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90 22 */ 23 24 /* 25 * Reno i386 version, from Mike Hibler's hp300 version. 26 */ 27 28 /* 29 * Manages physical address maps. 30 * 31 * In addition to hardware address maps, this 32 * module is called upon to provide software-use-only 33 * maps which may or may not be stored in the same 34 * form as hardware maps. These pseudo-maps are 35 * used to store intermediate results from copy 36 * operations to and from address spaces. 37 * 38 * Since the information managed by this module is 39 * also stored by the logical address mapping module, 40 * this module may throw away valid virtual-to-physical 41 * mappings at almost any time. However, invalidations 42 * of virtual-to-physical mappings must be done as 43 * requested. 44 * 45 * In order to cope with hardware architectures which 46 * make virtual-to-physical map invalidates expensive, 47 * this module may delay invalidate or reduced protection 48 * operations until such time as they are actually 49 * necessary. This module is given full information as 50 * to which processors are currently using which maps, 51 * and to when physical maps must be made correct. 52 */ 53 54 #include "param.h" 55 #include "proc.h" 56 #include "malloc.h" 57 #include "user.h" 58 59 #include "vm/vm.h" 60 #include "vm/vm_kern.h" 61 #include "vm/vm_page.h" 62 /*#include "vm/vm_pageout.h"*/ 63 64 /*#include "machine/isa.h"*/ 65 66 /* 67 * Allocate various and sundry SYSMAPs used in the days of old VM 68 * and not yet converted. XXX. 69 */ 70 #define BSDVM_COMPAT 1 71 72 #ifdef DEBUG 73 struct { 74 int kernel; /* entering kernel mapping */ 75 int user; /* entering user mapping */ 76 int ptpneeded; /* needed to allocate a PT page */ 77 int pwchange; /* no mapping change, just wiring or protection */ 78 int wchange; /* no mapping change, just wiring */ 79 int mchange; /* was mapped but mapping to different page */ 80 int managed; /* a managed page */ 81 int firstpv; /* first mapping for this PA */ 82 int secondpv; /* second mapping for this PA */ 83 int ci; /* cache inhibited */ 84 int unmanaged; /* not a managed page */ 85 int flushes; /* cache flushes */ 86 } enter_stats; 87 struct { 88 int calls; 89 int removes; 90 int pvfirst; 91 int pvsearch; 92 int ptinvalid; 93 int uflushes; 94 int sflushes; 95 } remove_stats; 96 97 int debugmap = 0; 98 int pmapdebug = 0 /* 0xffff */; 99 #define PDB_FOLLOW 0x0001 100 #define PDB_INIT 0x0002 101 #define PDB_ENTER 0x0004 102 #define PDB_REMOVE 0x0008 103 #define PDB_CREATE 0x0010 104 #define PDB_PTPAGE 0x0020 105 #define PDB_CACHE 0x0040 106 #define PDB_BITS 0x0080 107 #define PDB_COLLECT 0x0100 108 #define PDB_PROTECT 0x0200 109 #define PDB_PDRTAB 0x0400 110 #define PDB_PARANOIA 0x2000 111 #define PDB_WIRING 0x4000 112 #define PDB_PVDUMP 0x8000 113 114 int pmapvacflush = 0; 115 #define PVF_ENTER 0x01 116 #define PVF_REMOVE 0x02 117 #define PVF_PROTECT 0x04 118 #define PVF_TOTAL 0x80 119 #endif 120 121 /* 122 * Get PDEs and PTEs for user/kernel address space 123 */ 124 #define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023])) 125 126 #define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME) 127 128 #define pmap_pde_v(pte) ((pte)->pd_v) 129 #define pmap_pte_w(pte) ((pte)->pg_w) 130 /* #define pmap_pte_ci(pte) ((pte)->pg_ci) */ 131 #define pmap_pte_m(pte) ((pte)->pg_m) 132 #define pmap_pte_u(pte) ((pte)->pg_u) 133 #define pmap_pte_v(pte) ((pte)->pg_v) 134 #define pmap_pte_set_w(pte, v) ((pte)->pg_w = (v)) 135 #define pmap_pte_set_prot(pte, v) ((pte)->pg_prot = (v)) 136 137 /* 138 * Given a map and a machine independent protection code, 139 * convert to a vax protection code. 140 */ 141 #define pte_prot(m, p) (protection_codes[p]) 142 int protection_codes[8]; 143 144 struct pmap kernel_pmap_store; 145 pmap_t kernel_pmap; 146 147 vm_offset_t avail_start; /* PA of first available physical page */ 148 vm_offset_t avail_end; /* PA of last available physical page */ 149 vm_size_t mem_size; /* memory size in bytes */ 150 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/ 151 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 152 vm_offset_t vm_first_phys; /* PA of first managed page */ 153 vm_offset_t vm_last_phys; /* PA just past last managed page */ 154 int i386pagesperpage; /* PAGE_SIZE / I386_PAGE_SIZE */ 155 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 156 char *pmap_attributes; /* reference and modify bits */ 157 158 boolean_t pmap_testbit(); 159 void pmap_clear_modify(); 160 161 #if BSDVM_COMPAT 162 #include "msgbuf.h" 163 164 /* 165 * All those kernel PT submaps that BSD is so fond of 166 */ 167 struct pte *CMAP1, *CMAP2, *mmap; 168 caddr_t CADDR1, CADDR2, vmmap; 169 struct pte *msgbufmap; 170 struct msgbuf *msgbufp; 171 #endif 172 173 /* 174 * Bootstrap the system enough to run with virtual memory. 175 * Map the kernel's code and data, and allocate the system page table. 176 * 177 * On the I386 this is called after mapping has already been enabled 178 * and just syncs the pmap module with what has already been done. 179 * [We can't call it easily with mapping off since the kernel is not 180 * mapped with PA == VA, hence we would have to relocate every address 181 * from the linked base (virtual) address 0xFE000000 to the actual 182 * (physical) address starting relative to 0] 183 */ 184 struct pte *pmap_pte(); 185 186 extern vm_offset_t atdevbase; 187 void 188 pmap_bootstrap(firstaddr, loadaddr) 189 vm_offset_t firstaddr; 190 vm_offset_t loadaddr; 191 { 192 #if BSDVM_COMPAT 193 vm_offset_t va; 194 struct pte *pte; 195 #endif 196 extern vm_offset_t maxmem, physmem; 197 extern int IdlePTD; 198 199 firstaddr = 0x100000; /*XXX basemem completely fucked (again) */ 200 avail_start = firstaddr; 201 avail_end = maxmem << PG_SHIFT; 202 203 /* XXX: allow for msgbuf */ 204 avail_end -= i386_round_page(sizeof(struct msgbuf)); 205 206 mem_size = physmem << PG_SHIFT; 207 virtual_avail = atdevbase + 0x100000 - 0xa0000 + 10*NBPG; 208 virtual_end = VM_MAX_KERNEL_ADDRESS; 209 i386pagesperpage = PAGE_SIZE / I386_PAGE_SIZE; 210 211 /* 212 * Initialize protection array. 213 */ 214 i386_protection_init(); 215 216 /* 217 * The kernel's pmap is statically allocated so we don't 218 * have to use pmap_create, which is unlikely to work 219 * correctly at this part of the boot sequence. 220 */ 221 kernel_pmap = &kernel_pmap_store; 222 223 #ifdef notdef 224 /* 225 * Create Kernel page directory table and page maps. 226 * [ currently done in locore. i have wild and crazy ideas -wfj ] 227 */ 228 bzero(firstaddr, 4*NBPG); 229 kernel_pmap->pm_pdir = firstaddr + VM_MIN_KERNEL_ADDRESS; 230 kernel_pmap->pm_ptab = firstaddr + VM_MIN_KERNEL_ADDRESS + NBPG; 231 232 firstaddr += NBPG; 233 for (x = i386_btod(VM_MIN_KERNEL_ADDRESS); 234 x < i386_btod(VM_MIN_KERNEL_ADDRESS)+3; x++) { 235 struct pde *pde; 236 pde = kernel_pmap->pm_pdir + x; 237 *(int *)pde = firstaddr + x*NBPG | PG_V | PG_KW; 238 } 239 #else 240 kernel_pmap->pm_pdir = (pd_entry_t *)(0xfe000000 + IdlePTD); 241 #endif 242 243 244 simple_lock_init(&kernel_pmap->pm_lock); 245 kernel_pmap->pm_count = 1; 246 247 #if BSDVM_COMPAT 248 /* 249 * Allocate all the submaps we need 250 */ 251 #define SYSMAP(c, p, v, n) \ 252 v = (c)va; va += ((n)*I386_PAGE_SIZE); p = pte; pte += (n); 253 254 va = virtual_avail; 255 pte = pmap_pte(kernel_pmap, va); 256 257 SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 ) 258 SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 ) 259 SYSMAP(caddr_t ,mmap ,vmmap ,1 ) 260 SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 ) 261 virtual_avail = va; 262 #endif 263 264 /**(int *)PTD = 0; 265 load_cr3(rcr3());*/ 266 267 } 268 269 /* 270 * Initialize the pmap module. 271 * Called by vm_init, to initialize any structures that the pmap 272 * system needs to map virtual memory. 273 */ 274 void 275 pmap_init(phys_start, phys_end) 276 vm_offset_t phys_start, phys_end; 277 { 278 vm_offset_t addr, addr2; 279 vm_size_t npg, s; 280 int rv; 281 extern int KPTphys; 282 283 #ifdef DEBUG 284 if (pmapdebug & PDB_FOLLOW) 285 printf("pmap_init(%x, %x)\n", phys_start, phys_end); 286 #endif 287 /* 288 * Now that kernel map has been allocated, we can mark as 289 * unavailable regions which we have mapped in locore. 290 */ 291 addr = atdevbase; 292 (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0, 293 &addr, (0x100000-0xa0000), FALSE); 294 295 addr = (vm_offset_t) 0xfe000000+KPTphys/* *NBPG */; 296 vm_object_reference(kernel_object); 297 (void) vm_map_find(kernel_map, kernel_object, addr, 298 &addr, 2*NBPG, FALSE); 299 300 /* 301 * Allocate memory for random pmap data structures. Includes the 302 * pv_head_table and pmap_attributes. 303 */ 304 npg = atop(phys_end - phys_start); 305 s = (vm_size_t) (sizeof(struct pv_entry) * npg + npg); 306 s = round_page(s); 307 addr = (vm_offset_t) kmem_alloc(kernel_map, s); 308 pv_table = (pv_entry_t) addr; 309 addr += sizeof(struct pv_entry) * npg; 310 pmap_attributes = (char *) addr; 311 #ifdef DEBUG 312 if (pmapdebug & PDB_INIT) 313 printf("pmap_init: %x bytes (%x pgs): tbl %x attr %x\n", 314 s, npg, pv_table, pmap_attributes); 315 #endif 316 317 /* 318 * Now it is safe to enable pv_table recording. 319 */ 320 vm_first_phys = phys_start; 321 vm_last_phys = phys_end; 322 pmap_initialized = TRUE; 323 } 324 325 /* 326 * Used to map a range of physical addresses into kernel 327 * virtual address space. 328 * 329 * For now, VM is already on, we only need to map the 330 * specified memory. 331 */ 332 vm_offset_t 333 pmap_map(virt, start, end, prot) 334 vm_offset_t virt; 335 vm_offset_t start; 336 vm_offset_t end; 337 int prot; 338 { 339 #ifdef DEBUG 340 if (pmapdebug & PDB_FOLLOW) 341 printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot); 342 #endif 343 while (start < end) { 344 pmap_enter(kernel_pmap, virt, start, prot, FALSE); 345 virt += PAGE_SIZE; 346 start += PAGE_SIZE; 347 } 348 return(virt); 349 } 350 351 /* 352 * Create and return a physical map. 353 * 354 * If the size specified for the map 355 * is zero, the map is an actual physical 356 * map, and may be referenced by the 357 * hardware. 358 * 359 * If the size specified is non-zero, 360 * the map will be used in software only, and 361 * is bounded by that size. 362 * 363 * [ just allocate a ptd and mark it uninitialize -- should we track 364 * with a table which process has which ptd? -wfj ] 365 */ 366 367 pmap_t 368 pmap_create(size) 369 vm_size_t size; 370 { 371 register pmap_t pmap; 372 373 #ifdef DEBUG 374 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 375 printf("pmap_create(%x)\n", size); 376 #endif 377 /* 378 * Software use map does not need a pmap 379 */ 380 if (size) 381 return(NULL); 382 383 /* XXX: is it ok to wait here? */ 384 pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); 385 #ifdef notifwewait 386 if (pmap == NULL) 387 panic("pmap_create: cannot allocate a pmap"); 388 #endif 389 bzero(pmap, sizeof(*pmap)); 390 pmap_pinit(pmap); 391 return (pmap); 392 } 393 394 /* 395 * Initialize a preallocated and zeroed pmap structure, 396 * such as one in a vmspace structure. 397 */ 398 void 399 pmap_pinit(pmap) 400 register struct pmap *pmap; 401 { 402 403 #ifdef DEBUG 404 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 405 pg("pmap_pinit(%x)\n", pmap); 406 #endif 407 408 /* 409 * No need to allocate page table space yet but we do need a 410 * valid page directory table. 411 */ 412 pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG); 413 414 /* wire in kernel global address entries */ 415 bcopy(PTD+KPTDI_FIRST, pmap->pm_pdir+KPTDI_FIRST, 416 (KPTDI_LAST-KPTDI_FIRST+1)*4); 417 418 /* install self-referential address mapping entry */ 419 *(int *)(pmap->pm_pdir+PTDPTDI) = 420 (int)pmap_extract(kernel_pmap, pmap->pm_pdir) | PG_V | PG_URKW; 421 422 pmap->pm_count = 1; 423 simple_lock_init(&pmap->pm_lock); 424 } 425 426 /* 427 * Retire the given physical map from service. 428 * Should only be called if the map contains 429 * no valid mappings. 430 */ 431 void 432 pmap_destroy(pmap) 433 register pmap_t pmap; 434 { 435 int count; 436 437 #ifdef DEBUG 438 if (pmapdebug & PDB_FOLLOW) 439 printf("pmap_destroy(%x)\n", pmap); 440 #endif 441 if (pmap == NULL) 442 return; 443 444 simple_lock(&pmap->pm_lock); 445 count = --pmap->pm_count; 446 simple_unlock(&pmap->pm_lock); 447 if (count == 0) { 448 pmap_release(pmap); 449 free((caddr_t)pmap, M_VMPMAP); 450 } 451 } 452 453 /* 454 * Release any resources held by the given physical map. 455 * Called when a pmap initialized by pmap_pinit is being released. 456 * Should only be called if the map contains no valid mappings. 457 */ 458 void 459 pmap_release(pmap) 460 register struct pmap *pmap; 461 { 462 463 #ifdef DEBUG 464 if (pmapdebug & PDB_FOLLOW) 465 pg("pmap_release(%x)\n", pmap); 466 #endif 467 #ifdef notdef /* DIAGNOSTIC */ 468 /* count would be 0 from pmap_destroy... */ 469 simple_lock(&pmap->pm_lock); 470 if (pmap->pm_count != 1) 471 panic("pmap_release count"); 472 #endif 473 kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG); 474 } 475 476 /* 477 * Add a reference to the specified pmap. 478 */ 479 void 480 pmap_reference(pmap) 481 pmap_t pmap; 482 { 483 #ifdef DEBUG 484 if (pmapdebug & PDB_FOLLOW) 485 printf("pmap_reference(%x)", pmap); 486 #endif 487 if (pmap != NULL) { 488 simple_lock(&pmap->pm_lock); 489 pmap->pm_count++; 490 simple_unlock(&pmap->pm_lock); 491 } 492 } 493 494 /* 495 * Remove the given range of addresses from the specified map. 496 * 497 * It is assumed that the start and end are properly 498 * rounded to the page size. 499 */ 500 void 501 pmap_remove(pmap, sva, eva) 502 register struct pmap *pmap; 503 vm_offset_t sva, eva; 504 { 505 register vm_offset_t pa, va; 506 register pt_entry_t *pte; 507 register pv_entry_t pv, npv; 508 register int ix; 509 pmap_t ptpmap; 510 int *pde, s, bits; 511 boolean_t firstpage = TRUE; 512 boolean_t flushcache = FALSE; 513 #ifdef DEBUG 514 pt_entry_t opte; 515 516 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 517 pg("pmap_remove(%x, %x, %x)", pmap, sva, eva); 518 #endif 519 520 if (pmap == NULL) 521 return; 522 523 #ifdef DEBUG 524 remove_stats.calls++; 525 #endif 526 for (va = sva; va < eva; va += PAGE_SIZE) { 527 /* 528 * Weed out invalid mappings. 529 * Note: we assume that the page directory table is 530 * always allocated, and in kernel virtual. 531 */ 532 if (!pmap_pde_v(pmap_pde(pmap, va))) 533 continue; 534 535 pte = pmap_pte(pmap, va); 536 if (pte == 0) 537 continue; 538 pa = pmap_pte_pa(pte); 539 if (pa == 0) 540 continue; 541 #ifdef DEBUG 542 opte = *pte; 543 remove_stats.removes++; 544 #endif 545 /* 546 * Update statistics 547 */ 548 if (pmap_pte_w(pte)) 549 pmap->pm_stats.wired_count--; 550 pmap->pm_stats.resident_count--; 551 552 /* 553 * Invalidate the PTEs. 554 * XXX: should cluster them up and invalidate as many 555 * as possible at once. 556 */ 557 #ifdef DEBUG 558 if (pmapdebug & PDB_REMOVE) 559 printf("remove: inv %x ptes at %x(%x) ", 560 i386pagesperpage, pte, *(int *)pte); 561 #endif 562 bits = ix = 0; 563 do { 564 bits |= *(int *)pte & (PG_U|PG_M); 565 *(int *)pte++ = 0; 566 /*TBIS(va + ix * I386_PAGE_SIZE);*/ 567 } while (++ix != i386pagesperpage); 568 if (pmap == &curproc->p_vmspace->vm_pmap) 569 pmap_activate(pmap, (struct pcb *)curproc->p_addr); 570 /* are we current address space or kernel? */ 571 /*if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum 572 || pmap == kernel_pmap) 573 load_cr3(curpcb->pcb_ptd);*/ 574 tlbflush(); 575 576 #ifdef needednotdone 577 reduce wiring count on page table pages as references drop 578 #endif 579 580 /* 581 * Remove from the PV table (raise IPL since we 582 * may be called at interrupt time). 583 */ 584 if (pa < vm_first_phys || pa >= vm_last_phys) 585 continue; 586 pv = pa_to_pvh(pa); 587 s = splimp(); 588 /* 589 * If it is the first entry on the list, it is actually 590 * in the header and we must copy the following entry up 591 * to the header. Otherwise we must search the list for 592 * the entry. In either case we free the now unused entry. 593 */ 594 if (pmap == pv->pv_pmap && va == pv->pv_va) { 595 npv = pv->pv_next; 596 if (npv) { 597 *pv = *npv; 598 free((caddr_t)npv, M_VMPVENT); 599 } else 600 pv->pv_pmap = NULL; 601 #ifdef DEBUG 602 remove_stats.pvfirst++; 603 #endif 604 } else { 605 for (npv = pv->pv_next; npv; npv = npv->pv_next) { 606 #ifdef DEBUG 607 remove_stats.pvsearch++; 608 #endif 609 if (pmap == npv->pv_pmap && va == npv->pv_va) 610 break; 611 pv = npv; 612 } 613 #ifdef DEBUG 614 if (npv == NULL) 615 panic("pmap_remove: PA not in pv_tab"); 616 #endif 617 pv->pv_next = npv->pv_next; 618 free((caddr_t)npv, M_VMPVENT); 619 pv = pa_to_pvh(pa); 620 } 621 622 #ifdef notdef 623 [tally number of pagetable pages, if sharing of ptpages adjust here] 624 #endif 625 /* 626 * Update saved attributes for managed page 627 */ 628 pmap_attributes[pa_index(pa)] |= bits; 629 splx(s); 630 } 631 #ifdef notdef 632 [cache and tlb flushing, if needed] 633 #endif 634 } 635 636 /* 637 * Routine: pmap_remove_all 638 * Function: 639 * Removes this physical page from 640 * all physical maps in which it resides. 641 * Reflects back modify bits to the pager. 642 */ 643 void 644 pmap_remove_all(pa) 645 vm_offset_t pa; 646 { 647 register pv_entry_t pv; 648 int s; 649 650 #ifdef DEBUG 651 if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 652 printf("pmap_remove_all(%x)", pa); 653 /*pmap_pvdump(pa);*/ 654 #endif 655 /* 656 * Not one of ours 657 */ 658 if (pa < vm_first_phys || pa >= vm_last_phys) 659 return; 660 661 pv = pa_to_pvh(pa); 662 s = splimp(); 663 /* 664 * Do it the easy way for now 665 */ 666 while (pv->pv_pmap != NULL) { 667 #ifdef DEBUG 668 if (!pmap_pde_v(pmap_pde(pv->pv_pmap, pv->pv_va)) || 669 pmap_pte_pa(pmap_pte(pv->pv_pmap, pv->pv_va)) != pa) 670 panic("pmap_remove_all: bad mapping"); 671 #endif 672 pmap_remove(pv->pv_pmap, pv->pv_va, pv->pv_va + PAGE_SIZE); 673 } 674 splx(s); 675 } 676 677 /* 678 * Routine: pmap_copy_on_write 679 * Function: 680 * Remove write privileges from all 681 * physical maps for this physical page. 682 */ 683 void 684 pmap_copy_on_write(pa) 685 vm_offset_t pa; 686 { 687 #ifdef DEBUG 688 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 689 printf("pmap_copy_on_write(%x)", pa); 690 #endif 691 pmap_changebit(pa, PG_RO, TRUE); 692 } 693 694 /* 695 * Set the physical protection on the 696 * specified range of this map as requested. 697 */ 698 void 699 pmap_protect(pmap, sva, eva, prot) 700 register pmap_t pmap; 701 vm_offset_t sva, eva; 702 vm_prot_t prot; 703 { 704 register pt_entry_t *pte; 705 register vm_offset_t va; 706 register int ix; 707 int i386prot; 708 boolean_t firstpage = TRUE; 709 710 #ifdef DEBUG 711 if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 712 printf("pmap_protect(%x, %x, %x, %x)", pmap, sva, eva, prot); 713 #endif 714 if (pmap == NULL) 715 return; 716 717 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 718 pmap_remove(pmap, sva, eva); 719 return; 720 } 721 if (prot & VM_PROT_WRITE) 722 return; 723 724 pte = pmap_pte(pmap, sva); 725 if(!pte) return; 726 for (va = sva; va < eva; va += PAGE_SIZE) { 727 /* 728 * Page table page is not allocated. 729 * Skip it, we don't want to force allocation 730 * of unnecessary PTE pages just to set the protection. 731 */ 732 if (!pmap_pde_v(pmap_pde(pmap, va))) { 733 /* XXX: avoid address wrap around */ 734 if (va >= i386_trunc_pdr((vm_offset_t)-1)) 735 break; 736 va = i386_round_pdr(va + PAGE_SIZE) - PAGE_SIZE; 737 pte = pmap_pte(pmap, va); 738 pte += i386pagesperpage; 739 continue; 740 } 741 if(!pte) return; 742 /* 743 * Page not valid. Again, skip it. 744 * Should we do this? Or set protection anyway? 745 */ 746 if (!pmap_pte_v(pte)) { 747 pte += i386pagesperpage; 748 continue; 749 } 750 ix = 0; 751 i386prot = pte_prot(pmap, prot); 752 if(va < UPT_MAX_ADDRESS) 753 i386prot |= 2 /*PG_u*/; 754 do { 755 /* clear VAC here if PG_RO? */ 756 pmap_pte_set_prot(pte++, i386prot); 757 /*TBIS(va + ix * I386_PAGE_SIZE);*/ 758 } while (++ix != i386pagesperpage); 759 } 760 if (pmap == &curproc->p_vmspace->vm_pmap) 761 pmap_activate(pmap, (struct pcb *)curproc->p_addr); 762 } 763 764 /* 765 * Insert the given physical page (p) at 766 * the specified virtual address (v) in the 767 * target physical map with the protection requested. 768 * 769 * If specified, the page will be wired down, meaning 770 * that the related pte can not be reclaimed. 771 * 772 * NB: This is the only routine which MAY NOT lazy-evaluate 773 * or lose information. That is, this routine must actually 774 * insert this page into the given map NOW. 775 */ 776 void 777 pmap_enter(pmap, va, pa, prot, wired) 778 register pmap_t pmap; 779 vm_offset_t va; 780 register vm_offset_t pa; 781 vm_prot_t prot; 782 boolean_t wired; 783 { 784 register pt_entry_t *pte; 785 register int npte, ix; 786 vm_offset_t opa; 787 boolean_t cacheable = TRUE; 788 boolean_t checkpv = TRUE; 789 790 #ifdef DEBUG 791 if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 792 printf("pmap_enter(%x, %x, %x, %x, %x)", 793 pmap, va, pa, prot, wired); 794 #endif 795 if (pmap == NULL) 796 return; 797 798 if(va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig"); 799 /* also, should not muck with PTD va! */ 800 801 #ifdef DEBUG 802 if (pmap == kernel_pmap) 803 enter_stats.kernel++; 804 else 805 enter_stats.user++; 806 #endif 807 808 /* 809 * Page Directory table entry not valid, we need a new PT page 810 */ 811 if (!pmap_pde_v(pmap_pde(pmap, va))) { 812 pg("ptdi %x", pmap->pm_pdir[PTDPTDI]); 813 } 814 815 pte = pmap_pte(pmap, va); 816 opa = pmap_pte_pa(pte); 817 #ifdef DEBUG 818 if (pmapdebug & PDB_ENTER) 819 printf("enter: pte %x, *pte %x ", pte, *(int *)pte); 820 #endif 821 822 /* 823 * Mapping has not changed, must be protection or wiring change. 824 */ 825 if (opa == pa) { 826 #ifdef DEBUG 827 enter_stats.pwchange++; 828 #endif 829 /* 830 * Wiring change, just update stats. 831 * We don't worry about wiring PT pages as they remain 832 * resident as long as there are valid mappings in them. 833 * Hence, if a user page is wired, the PT page will be also. 834 */ 835 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { 836 #ifdef DEBUG 837 if (pmapdebug & PDB_ENTER) 838 pg("enter: wiring change -> %x ", wired); 839 #endif 840 if (wired) 841 pmap->pm_stats.wired_count++; 842 else 843 pmap->pm_stats.wired_count--; 844 #ifdef DEBUG 845 enter_stats.wchange++; 846 #endif 847 } 848 goto validate; 849 } 850 851 /* 852 * Mapping has changed, invalidate old range and fall through to 853 * handle validating new mapping. 854 */ 855 if (opa) { 856 #ifdef DEBUG 857 if (pmapdebug & PDB_ENTER) 858 printf("enter: removing old mapping %x pa %x ", va, opa); 859 #endif 860 pmap_remove(pmap, va, va + PAGE_SIZE); 861 #ifdef DEBUG 862 enter_stats.mchange++; 863 #endif 864 } 865 866 /* 867 * Enter on the PV list if part of our managed memory 868 * Note that we raise IPL while manipulating pv_table 869 * since pmap_enter can be called at interrupt time. 870 */ 871 if (pa >= vm_first_phys && pa < vm_last_phys) { 872 register pv_entry_t pv, npv; 873 int s; 874 875 #ifdef DEBUG 876 enter_stats.managed++; 877 #endif 878 pv = pa_to_pvh(pa); 879 s = splimp(); 880 #ifdef DEBUG 881 if (pmapdebug & PDB_ENTER) 882 printf("enter: pv at %x: %x/%x/%x ", 883 pv, pv->pv_va, pv->pv_pmap, pv->pv_next); 884 #endif 885 /* 886 * No entries yet, use header as the first entry 887 */ 888 if (pv->pv_pmap == NULL) { 889 #ifdef DEBUG 890 enter_stats.firstpv++; 891 #endif 892 pv->pv_va = va; 893 pv->pv_pmap = pmap; 894 pv->pv_next = NULL; 895 pv->pv_flags = 0; 896 } 897 /* 898 * There is at least one other VA mapping this page. 899 * Place this entry after the header. 900 */ 901 else { 902 /*printf("second time: ");*/ 903 #ifdef DEBUG 904 for (npv = pv; npv; npv = npv->pv_next) 905 if (pmap == npv->pv_pmap && va == npv->pv_va) 906 panic("pmap_enter: already in pv_tab"); 907 #endif 908 npv = (pv_entry_t) 909 malloc(sizeof *npv, M_VMPVENT, M_NOWAIT); 910 npv->pv_va = va; 911 npv->pv_pmap = pmap; 912 npv->pv_next = pv->pv_next; 913 pv->pv_next = npv; 914 #ifdef DEBUG 915 if (!npv->pv_next) 916 enter_stats.secondpv++; 917 #endif 918 splx(s); 919 } 920 } 921 /* 922 * Assumption: if it is not part of our managed memory 923 * then it must be device memory which may be volitile. 924 */ 925 if (pmap_initialized) { 926 checkpv = cacheable = FALSE; 927 #ifdef DEBUG 928 enter_stats.unmanaged++; 929 #endif 930 } 931 932 /* 933 * Increment counters 934 */ 935 pmap->pm_stats.resident_count++; 936 if (wired) 937 pmap->pm_stats.wired_count++; 938 939 validate: 940 /* 941 * Now validate mapping with desired protection/wiring. 942 * Assume uniform modified and referenced status for all 943 * I386 pages in a MACH page. 944 */ 945 npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V; 946 npte |= (*(int *)pte & (PG_M|PG_U)); 947 if (wired) 948 npte |= PG_W; 949 if(va < UPT_MIN_ADDRESS) 950 npte |= PG_u; 951 else if(va < UPT_MAX_ADDRESS) 952 npte |= PG_u | PG_RW; 953 #ifdef DEBUG 954 if (pmapdebug & PDB_ENTER) 955 printf("enter: new pte value %x ", npte); 956 #endif 957 ix = 0; 958 do { 959 *(int *)pte++ = npte; 960 /*TBIS(va);*/ 961 npte += I386_PAGE_SIZE; 962 va += I386_PAGE_SIZE; 963 } while (++ix != i386pagesperpage); 964 pte--; 965 #ifdef DEBUGx 966 cache, tlb flushes 967 #endif 968 /*pads(pmap);*/ 969 /*load_cr3(((struct pcb *)curproc->p_addr)->pcb_ptd);*/ 970 tlbflush(); 971 } 972 973 /* 974 * pmap_page_protect: 975 * 976 * Lower the permission for all mappings to a given page. 977 */ 978 void 979 pmap_page_protect(phys, prot) 980 vm_offset_t phys; 981 vm_prot_t prot; 982 { 983 switch (prot) { 984 case VM_PROT_READ: 985 case VM_PROT_READ|VM_PROT_EXECUTE: 986 pmap_copy_on_write(phys); 987 break; 988 case VM_PROT_ALL: 989 break; 990 default: 991 pmap_remove_all(phys); 992 break; 993 } 994 } 995 996 /* 997 * Routine: pmap_change_wiring 998 * Function: Change the wiring attribute for a map/virtual-address 999 * pair. 1000 * In/out conditions: 1001 * The mapping must already exist in the pmap. 1002 */ 1003 void 1004 pmap_change_wiring(pmap, va, wired) 1005 register pmap_t pmap; 1006 vm_offset_t va; 1007 boolean_t wired; 1008 { 1009 register pt_entry_t *pte; 1010 register int ix; 1011 1012 #ifdef DEBUG 1013 if (pmapdebug & PDB_FOLLOW) 1014 printf("pmap_change_wiring(%x, %x, %x)", pmap, va, wired); 1015 #endif 1016 if (pmap == NULL) 1017 return; 1018 1019 pte = pmap_pte(pmap, va); 1020 #ifdef DEBUG 1021 /* 1022 * Page table page is not allocated. 1023 * Should this ever happen? Ignore it for now, 1024 * we don't want to force allocation of unnecessary PTE pages. 1025 */ 1026 if (!pmap_pde_v(pmap_pde(pmap, va))) { 1027 if (pmapdebug & PDB_PARANOIA) 1028 pg("pmap_change_wiring: invalid PDE for %x ", va); 1029 return; 1030 } 1031 /* 1032 * Page not valid. Should this ever happen? 1033 * Just continue and change wiring anyway. 1034 */ 1035 if (!pmap_pte_v(pte)) { 1036 if (pmapdebug & PDB_PARANOIA) 1037 pg("pmap_change_wiring: invalid PTE for %x ", va); 1038 } 1039 #endif 1040 if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) { 1041 if (wired) 1042 pmap->pm_stats.wired_count++; 1043 else 1044 pmap->pm_stats.wired_count--; 1045 } 1046 /* 1047 * Wiring is not a hardware characteristic so there is no need 1048 * to invalidate TLB. 1049 */ 1050 ix = 0; 1051 do { 1052 pmap_pte_set_w(pte++, wired); 1053 } while (++ix != i386pagesperpage); 1054 } 1055 1056 /* 1057 * Routine: pmap_pte 1058 * Function: 1059 * Extract the page table entry associated 1060 * with the given map/virtual_address pair. 1061 * [ what about induced faults -wfj] 1062 */ 1063 1064 struct pte *pmap_pte(pmap, va) 1065 register pmap_t pmap; 1066 vm_offset_t va; 1067 { 1068 1069 #ifdef DEBUGx 1070 if (pmapdebug & PDB_FOLLOW) 1071 printf("pmap_pte(%x, %x) ->\n", pmap, va); 1072 #endif 1073 if (pmap && pmap_pde_v(pmap_pde(pmap, va))) { 1074 1075 /* are we current address space or kernel? */ 1076 if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum 1077 || pmap == kernel_pmap) 1078 return ((struct pte *) vtopte(va)); 1079 1080 /* otherwise, we are alternate address space */ 1081 else { 1082 if (pmap->pm_pdir[PTDPTDI].pd_pfnum 1083 != APTDpde.pd_pfnum) { 1084 APTDpde = pmap->pm_pdir[PTDPTDI]; 1085 tlbflush(); 1086 } 1087 return((struct pte *) avtopte(va)); 1088 } 1089 } 1090 return(0); 1091 } 1092 1093 /* 1094 * Routine: pmap_extract 1095 * Function: 1096 * Extract the physical page address associated 1097 * with the given map/virtual_address pair. 1098 */ 1099 1100 vm_offset_t 1101 pmap_extract(pmap, va) 1102 register pmap_t pmap; 1103 vm_offset_t va; 1104 { 1105 register vm_offset_t pa; 1106 1107 #ifdef DEBUGx 1108 if (pmapdebug & PDB_FOLLOW) 1109 pg("pmap_extract(%x, %x) -> ", pmap, va); 1110 #endif 1111 pa = 0; 1112 if (pmap && pmap_pde_v(pmap_pde(pmap, va))) { 1113 pa = *(int *) pmap_pte(pmap, va); 1114 } 1115 if (pa) 1116 pa = (pa & PG_FRAME) | (va & ~PG_FRAME); 1117 #ifdef DEBUGx 1118 if (pmapdebug & PDB_FOLLOW) 1119 printf("%x\n", pa); 1120 #endif 1121 return(pa); 1122 } 1123 1124 /* 1125 * Copy the range specified by src_addr/len 1126 * from the source map to the range dst_addr/len 1127 * in the destination map. 1128 * 1129 * This routine is only advisory and need not do anything. 1130 */ 1131 void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 1132 pmap_t dst_pmap; 1133 pmap_t src_pmap; 1134 vm_offset_t dst_addr; 1135 vm_size_t len; 1136 vm_offset_t src_addr; 1137 { 1138 #ifdef DEBUG 1139 if (pmapdebug & PDB_FOLLOW) 1140 printf("pmap_copy(%x, %x, %x, %x, %x)", 1141 dst_pmap, src_pmap, dst_addr, len, src_addr); 1142 #endif 1143 } 1144 1145 /* 1146 * Require that all active physical maps contain no 1147 * incorrect entries NOW. [This update includes 1148 * forcing updates of any address map caching.] 1149 * 1150 * Generally used to insure that a thread about 1151 * to run will see a semantically correct world. 1152 */ 1153 void pmap_update() 1154 { 1155 #ifdef DEBUG 1156 if (pmapdebug & PDB_FOLLOW) 1157 printf("pmap_update()"); 1158 #endif 1159 tlbflush(); 1160 } 1161 1162 /* 1163 * Routine: pmap_collect 1164 * Function: 1165 * Garbage collects the physical map system for 1166 * pages which are no longer used. 1167 * Success need not be guaranteed -- that is, there 1168 * may well be pages which are not referenced, but 1169 * others may be collected. 1170 * Usage: 1171 * Called by the pageout daemon when pages are scarce. 1172 * [ needs to be written -wfj ] 1173 */ 1174 void 1175 pmap_collect(pmap) 1176 pmap_t pmap; 1177 { 1178 register vm_offset_t pa; 1179 register pv_entry_t pv; 1180 register int *pte; 1181 vm_offset_t kpa; 1182 int s; 1183 1184 #ifdef DEBUG 1185 int *pde; 1186 int opmapdebug; 1187 printf("pmap_collect(%x) ", pmap); 1188 #endif 1189 if (pmap != kernel_pmap) 1190 return; 1191 1192 } 1193 1194 /* [ macro again?, should I force kstack into user map here? -wfj ] */ 1195 void 1196 pmap_activate(pmap, pcbp) 1197 register pmap_t pmap; 1198 struct pcb *pcbp; 1199 { 1200 int x; 1201 #ifdef DEBUG 1202 if (pmapdebug & (PDB_FOLLOW|PDB_PDRTAB)) 1203 pg("pmap_activate(%x, %x) ", pmap, pcbp); 1204 #endif 1205 PMAP_ACTIVATE(pmap, pcbp); 1206 /*printf("pde "); 1207 for(x=0x3f6; x < 0x3fA; x++) 1208 printf("%x ", pmap->pm_pdir[x]);*/ 1209 /*pads(pmap);*/ 1210 /*pg(" pcb_cr3 %x", pcbp->pcb_cr3);*/ 1211 } 1212 1213 /* 1214 * Routine: pmap_kernel 1215 * Function: 1216 * Returns the physical map handle for the kernel. 1217 */ 1218 pmap_t 1219 pmap_kernel() 1220 { 1221 return (kernel_pmap); 1222 } 1223 1224 /* 1225 * pmap_zero_page zeros the specified (machine independent) 1226 * page by mapping the page into virtual memory and using 1227 * bzero to clear its contents, one machine dependent page 1228 * at a time. 1229 */ 1230 pmap_zero_page(phys) 1231 register vm_offset_t phys; 1232 { 1233 register int ix; 1234 1235 #ifdef DEBUG 1236 if (pmapdebug & PDB_FOLLOW) 1237 printf("pmap_zero_page(%x)", phys); 1238 #endif 1239 phys >>= PG_SHIFT; 1240 ix = 0; 1241 do { 1242 clearseg(phys++); 1243 } while (++ix != i386pagesperpage); 1244 } 1245 1246 /* 1247 * pmap_copy_page copies the specified (machine independent) 1248 * page by mapping the page into virtual memory and using 1249 * bcopy to copy the page, one machine dependent page at a 1250 * time. 1251 */ 1252 pmap_copy_page(src, dst) 1253 register vm_offset_t src, dst; 1254 { 1255 register int ix; 1256 1257 #ifdef DEBUG 1258 if (pmapdebug & PDB_FOLLOW) 1259 printf("pmap_copy_page(%x, %x)", src, dst); 1260 #endif 1261 src >>= PG_SHIFT; 1262 dst >>= PG_SHIFT; 1263 ix = 0; 1264 do { 1265 physcopyseg(src++, dst++); 1266 } while (++ix != i386pagesperpage); 1267 } 1268 1269 1270 /* 1271 * Routine: pmap_pageable 1272 * Function: 1273 * Make the specified pages (by pmap, offset) 1274 * pageable (or not) as requested. 1275 * 1276 * A page which is not pageable may not take 1277 * a fault; therefore, its page table entry 1278 * must remain valid for the duration. 1279 * 1280 * This routine is merely advisory; pmap_enter 1281 * will specify that these pages are to be wired 1282 * down (or not) as appropriate. 1283 */ 1284 pmap_pageable(pmap, sva, eva, pageable) 1285 pmap_t pmap; 1286 vm_offset_t sva, eva; 1287 boolean_t pageable; 1288 { 1289 #ifdef DEBUG 1290 if (pmapdebug & PDB_FOLLOW) 1291 printf("pmap_pageable(%x, %x, %x, %x)", 1292 pmap, sva, eva, pageable); 1293 #endif 1294 /* 1295 * If we are making a PT page pageable then all valid 1296 * mappings must be gone from that page. Hence it should 1297 * be all zeros and there is no need to clean it. 1298 * Assumptions: 1299 * - we are called with only one page at a time 1300 * - PT pages have only one pv_table entry 1301 */ 1302 if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) { 1303 register pv_entry_t pv; 1304 register vm_offset_t pa; 1305 1306 #ifdef DEBUG 1307 if ((pmapdebug & (PDB_FOLLOW|PDB_PTPAGE)) == PDB_PTPAGE) 1308 printf("pmap_pageable(%x, %x, %x, %x)", 1309 pmap, sva, eva, pageable); 1310 #endif 1311 /*if (!pmap_pde_v(pmap_pde(pmap, sva))) 1312 return;*/ 1313 if(pmap_pte(pmap, sva) == 0) 1314 return; 1315 pa = pmap_pte_pa(pmap_pte(pmap, sva)); 1316 if (pa < vm_first_phys || pa >= vm_last_phys) 1317 return; 1318 pv = pa_to_pvh(pa); 1319 /*if (!ispt(pv->pv_va)) 1320 return;*/ 1321 #ifdef DEBUG 1322 if (pv->pv_va != sva || pv->pv_next) { 1323 pg("pmap_pageable: bad PT page va %x next %x\n", 1324 pv->pv_va, pv->pv_next); 1325 return; 1326 } 1327 #endif 1328 /* 1329 * Mark it unmodified to avoid pageout 1330 */ 1331 pmap_clear_modify(pa); 1332 #ifdef needsomethinglikethis 1333 if (pmapdebug & PDB_PTPAGE) 1334 pg("pmap_pageable: PT page %x(%x) unmodified\n", 1335 sva, *(int *)pmap_pte(pmap, sva)); 1336 if (pmapdebug & PDB_WIRING) 1337 pmap_check_wiring("pageable", sva); 1338 #endif 1339 } 1340 } 1341 1342 /* 1343 * Clear the modify bits on the specified physical page. 1344 */ 1345 1346 void 1347 pmap_clear_modify(pa) 1348 vm_offset_t pa; 1349 { 1350 #ifdef DEBUG 1351 if (pmapdebug & PDB_FOLLOW) 1352 printf("pmap_clear_modify(%x)", pa); 1353 #endif 1354 pmap_changebit(pa, PG_M, FALSE); 1355 } 1356 1357 /* 1358 * pmap_clear_reference: 1359 * 1360 * Clear the reference bit on the specified physical page. 1361 */ 1362 1363 void pmap_clear_reference(pa) 1364 vm_offset_t pa; 1365 { 1366 #ifdef DEBUG 1367 if (pmapdebug & PDB_FOLLOW) 1368 printf("pmap_clear_reference(%x)", pa); 1369 #endif 1370 pmap_changebit(pa, PG_U, FALSE); 1371 } 1372 1373 /* 1374 * pmap_is_referenced: 1375 * 1376 * Return whether or not the specified physical page is referenced 1377 * by any physical maps. 1378 */ 1379 1380 boolean_t 1381 pmap_is_referenced(pa) 1382 vm_offset_t pa; 1383 { 1384 #ifdef DEBUG 1385 if (pmapdebug & PDB_FOLLOW) { 1386 boolean_t rv = pmap_testbit(pa, PG_U); 1387 printf("pmap_is_referenced(%x) -> %c", pa, "FT"[rv]); 1388 return(rv); 1389 } 1390 #endif 1391 return(pmap_testbit(pa, PG_U)); 1392 } 1393 1394 /* 1395 * pmap_is_modified: 1396 * 1397 * Return whether or not the specified physical page is modified 1398 * by any physical maps. 1399 */ 1400 1401 boolean_t 1402 pmap_is_modified(pa) 1403 vm_offset_t pa; 1404 { 1405 #ifdef DEBUG 1406 if (pmapdebug & PDB_FOLLOW) { 1407 boolean_t rv = pmap_testbit(pa, PG_M); 1408 printf("pmap_is_modified(%x) -> %c", pa, "FT"[rv]); 1409 return(rv); 1410 } 1411 #endif 1412 return(pmap_testbit(pa, PG_M)); 1413 } 1414 1415 vm_offset_t 1416 pmap_phys_address(ppn) 1417 int ppn; 1418 { 1419 return(i386_ptob(ppn)); 1420 } 1421 1422 /* 1423 * Miscellaneous support routines follow 1424 */ 1425 1426 static 1427 i386_protection_init() 1428 { 1429 register int *kp, prot; 1430 1431 kp = protection_codes; 1432 for (prot = 0; prot < 8; prot++) { 1433 switch (prot) { 1434 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 1435 *kp++ = 0; 1436 break; 1437 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 1438 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 1439 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 1440 *kp++ = PG_RO; 1441 break; 1442 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 1443 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 1444 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 1445 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 1446 *kp++ = PG_RW; 1447 break; 1448 } 1449 } 1450 } 1451 1452 static 1453 boolean_t 1454 pmap_testbit(pa, bit) 1455 register vm_offset_t pa; 1456 int bit; 1457 { 1458 register pv_entry_t pv; 1459 register int *pte, ix; 1460 int s; 1461 1462 if (pa < vm_first_phys || pa >= vm_last_phys) 1463 return(FALSE); 1464 1465 pv = pa_to_pvh(pa); 1466 s = splimp(); 1467 /* 1468 * Check saved info first 1469 */ 1470 if (pmap_attributes[pa_index(pa)] & bit) { 1471 splx(s); 1472 return(TRUE); 1473 } 1474 /* 1475 * Not found, check current mappings returning 1476 * immediately if found. 1477 */ 1478 if (pv->pv_pmap != NULL) { 1479 for (; pv; pv = pv->pv_next) { 1480 pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va); 1481 ix = 0; 1482 do { 1483 if (*pte++ & bit) { 1484 splx(s); 1485 return(TRUE); 1486 } 1487 } while (++ix != i386pagesperpage); 1488 } 1489 } 1490 splx(s); 1491 return(FALSE); 1492 } 1493 1494 static 1495 pmap_changebit(pa, bit, setem) 1496 register vm_offset_t pa; 1497 int bit; 1498 boolean_t setem; 1499 { 1500 register pv_entry_t pv; 1501 register int *pte, npte, ix; 1502 vm_offset_t va; 1503 int s; 1504 boolean_t firstpage = TRUE; 1505 1506 #ifdef DEBUG 1507 if (pmapdebug & PDB_BITS) 1508 printf("pmap_changebit(%x, %x, %s)", 1509 pa, bit, setem ? "set" : "clear"); 1510 #endif 1511 if (pa < vm_first_phys || pa >= vm_last_phys) 1512 return; 1513 1514 pv = pa_to_pvh(pa); 1515 s = splimp(); 1516 /* 1517 * Clear saved attributes (modify, reference) 1518 */ 1519 if (!setem) 1520 pmap_attributes[pa_index(pa)] &= ~bit; 1521 /* 1522 * Loop over all current mappings setting/clearing as appropos 1523 * If setting RO do we need to clear the VAC? 1524 */ 1525 if (pv->pv_pmap != NULL) { 1526 #ifdef DEBUG 1527 int toflush = 0; 1528 #endif 1529 for (; pv; pv = pv->pv_next) { 1530 #ifdef DEBUG 1531 toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1; 1532 #endif 1533 va = pv->pv_va; 1534 1535 /* 1536 * XXX don't write protect pager mappings 1537 */ 1538 if (bit == PG_RO) { 1539 extern vm_offset_t pager_sva, pager_eva; 1540 1541 if (va >= pager_sva && va < pager_eva) 1542 continue; 1543 } 1544 1545 pte = (int *) pmap_pte(pv->pv_pmap, va); 1546 ix = 0; 1547 do { 1548 if (setem) 1549 npte = *pte | bit; 1550 else 1551 npte = *pte & ~bit; 1552 if (*pte != npte) { 1553 *pte = npte; 1554 /*TBIS(va);*/ 1555 } 1556 va += I386_PAGE_SIZE; 1557 pte++; 1558 } while (++ix != i386pagesperpage); 1559 1560 if (pv->pv_pmap == &curproc->p_vmspace->vm_pmap) 1561 pmap_activate(pv->pv_pmap, (struct pcb *)curproc->p_addr); 1562 } 1563 #ifdef somethinglikethis 1564 if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) { 1565 if ((pmapvacflush & PVF_TOTAL) || toflush == 3) 1566 DCIA(); 1567 else if (toflush == 2) 1568 DCIS(); 1569 else 1570 DCIU(); 1571 } 1572 #endif 1573 } 1574 splx(s); 1575 } 1576 1577 #ifdef DEBUG 1578 pmap_pvdump(pa) 1579 vm_offset_t pa; 1580 { 1581 register pv_entry_t pv; 1582 1583 printf("pa %x", pa); 1584 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) { 1585 printf(" -> pmap %x, va %x, flags %x", 1586 pv->pv_pmap, pv->pv_va, pv->pv_flags); 1587 pads(pv->pv_pmap); 1588 } 1589 printf(" "); 1590 } 1591 1592 #ifdef notyet 1593 pmap_check_wiring(str, va) 1594 char *str; 1595 vm_offset_t va; 1596 { 1597 vm_map_entry_t entry; 1598 register int count, *pte; 1599 1600 va = trunc_page(va); 1601 if (!pmap_pde_v(pmap_pde(kernel_pmap, va)) || 1602 !pmap_pte_v(pmap_pte(kernel_pmap, va))) 1603 return; 1604 1605 if (!vm_map_lookup_entry(pt_map, va, &entry)) { 1606 pg("wired_check: entry for %x not found\n", va); 1607 return; 1608 } 1609 count = 0; 1610 for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++) 1611 if (*pte) 1612 count++; 1613 if (entry->wired_count != count) 1614 pg("*%s*: %x: w%d/a%d\n", 1615 str, va, entry->wired_count, count); 1616 } 1617 #endif 1618 1619 /* print address space of pmap*/ 1620 pads(pm) pmap_t pm; { 1621 unsigned va, i, j; 1622 struct pte *ptep; 1623 1624 if(pm == kernel_pmap) return; 1625 for (i = 0; i < 1024; i++) 1626 if(pm->pm_pdir[i].pd_v) 1627 for (j = 0; j < 1024 ; j++) { 1628 va = (i<<22)+(j<<12); 1629 if (pm == kernel_pmap && va < 0xfe000000) 1630 continue; 1631 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 1632 continue; 1633 ptep = pmap_pte(pm, va); 1634 if(pmap_pte_v(ptep)) 1635 printf("%x:%x ", va, *(int *)ptep); 1636 } ; 1637 1638 } 1639 #endif 1640