1 /* $NetBSD: pmap.c,v 1.111 2010/10/15 15:55:53 tsutsui Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jeremy Cooper. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * XXX These comments aren't quite accurate. Need to change. 34 * The sun3x uses the MC68851 Memory Management Unit, which is built 35 * into the CPU. The 68851 maps virtual to physical addresses using 36 * a multi-level table lookup, which is stored in the very memory that 37 * it maps. The number of levels of lookup is configurable from one 38 * to four. In this implementation, we use three, named 'A' through 'C'. 39 * 40 * The MMU translates virtual addresses into physical addresses by 41 * traversing these tables in a process called a 'table walk'. The most 42 * significant 7 bits of the Virtual Address ('VA') being translated are 43 * used as an index into the level A table, whose base in physical memory 44 * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The 45 * address found at that index in the A table is used as the base 46 * address for the next table, the B table. The next six bits of the VA are 47 * used as an index into the B table, which in turn gives the base address 48 * of the third and final C table. 49 * 50 * The next six bits of the VA are used as an index into the C table to 51 * locate a Page Table Entry (PTE). The PTE is a physical address in memory 52 * to which the remaining 13 bits of the VA are added, producing the 53 * mapped physical address. 54 * 55 * To map the entire memory space in this manner would require 2114296 bytes 56 * of page tables per process - quite expensive. Instead we will 57 * allocate a fixed but considerably smaller space for the page tables at 58 * the time the VM system is initialized. When the pmap code is asked by 59 * the kernel to map a VA to a PA, it allocates tables as needed from this 60 * pool. When there are no more tables in the pool, tables are stolen 61 * from the oldest mapped entries in the tree. This is only possible 62 * because all memory mappings are stored in the kernel memory map 63 * structures, independent of the pmap structures. A VA which references 64 * one of these invalidated maps will cause a page fault. The kernel 65 * will determine that the page fault was caused by a task using a valid 66 * VA, but for some reason (which does not concern it), that address was 67 * not mapped. It will ask the pmap code to re-map the entry and then 68 * it will resume executing the faulting task. 69 * 70 * In this manner the most efficient use of the page table space is 71 * achieved. Tasks which do not execute often will have their tables 72 * stolen and reused by tasks which execute more frequently. The best 73 * size for the page table pool will probably be determined by 74 * experimentation. 75 * 76 * You read all of the comments so far. Good for you. 77 * Now go play! 78 */ 79 80 /*** A Note About the 68851 Address Translation Cache 81 * The MC68851 has a 64 entry cache, called the Address Translation Cache 82 * or 'ATC'. This cache stores the most recently used page descriptors 83 * accessed by the MMU when it does translations. Using a marker called a 84 * 'task alias' the MMU can store the descriptors from 8 different table 85 * spaces concurrently. The task alias is associated with the base 86 * address of the level A table of that address space. When an address 87 * space is currently active (the CRP currently points to its A table) 88 * the only cached descriptors that will be obeyed are ones which have a 89 * matching task alias of the current space associated with them. 90 * 91 * Since the cache is always consulted before any table lookups are done, 92 * it is important that it accurately reflect the state of the MMU tables. 93 * Whenever a change has been made to a table that has been loaded into 94 * the MMU, the code must be sure to flush any cached entries that are 95 * affected by the change. These instances are documented in the code at 96 * various points. 97 */ 98 /*** A Note About the Note About the 68851 Address Translation Cache 99 * 4 months into this code I discovered that the sun3x does not have 100 * a MC68851 chip. Instead, it has a version of this MMU that is part of the 101 * the 68030 CPU. 102 * All though it behaves very similarly to the 68851, it only has 1 task 103 * alias and a 22 entry cache. So sadly (or happily), the first paragraph 104 * of the previous note does not apply to the sun3x pmap. 105 */ 106 107 #include <sys/cdefs.h> 108 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.111 2010/10/15 15:55:53 tsutsui Exp $"); 109 110 #include "opt_ddb.h" 111 #include "opt_pmap_debug.h" 112 113 #include <sys/param.h> 114 #include <sys/systm.h> 115 #include <sys/proc.h> 116 #include <sys/malloc.h> 117 #include <sys/pool.h> 118 #include <sys/queue.h> 119 #include <sys/kcore.h> 120 121 #include <uvm/uvm.h> 122 123 #include <machine/cpu.h> 124 #include <machine/kcore.h> 125 #include <machine/mon.h> 126 #include <machine/pmap.h> 127 #include <machine/pte.h> 128 #include <machine/vmparam.h> 129 #include <m68k/cacheops.h> 130 131 #include <sun3/sun3/cache.h> 132 #include <sun3/sun3/machdep.h> 133 134 #include "pmap_pvt.h" 135 136 /* XXX - What headers declare these? */ 137 extern struct pcb *curpcb; 138 139 /* Defined in locore.s */ 140 extern char kernel_text[]; 141 142 /* Defined by the linker */ 143 extern char etext[], edata[], end[]; 144 extern char *esym; /* DDB */ 145 146 /*************************** DEBUGGING DEFINITIONS *********************** 147 * Macros, preprocessor defines and variables used in debugging can make * 148 * code hard to read. Anything used exclusively for debugging purposes * 149 * is defined here to avoid having such mess scattered around the file. * 150 *************************************************************************/ 151 #ifdef PMAP_DEBUG 152 /* 153 * To aid the debugging process, macros should be expanded into smaller steps 154 * that accomplish the same goal, yet provide convenient places for placing 155 * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the 156 * 'INLINE' keyword is defined to an empty string. This way, any function 157 * defined to be a 'static INLINE' will become 'outlined' and compiled as 158 * a separate function, which is much easier to debug. 159 */ 160 #define INLINE /* nothing */ 161 162 /* 163 * It is sometimes convenient to watch the activity of a particular table 164 * in the system. The following variables are used for that purpose. 165 */ 166 a_tmgr_t *pmap_watch_atbl = 0; 167 b_tmgr_t *pmap_watch_btbl = 0; 168 c_tmgr_t *pmap_watch_ctbl = 0; 169 170 int pmap_debug = 0; 171 #define DPRINT(args) if (pmap_debug) printf args 172 173 #else /********** Stuff below is defined if NOT debugging **************/ 174 175 #define INLINE inline 176 #define DPRINT(args) /* nada */ 177 178 #endif /* PMAP_DEBUG */ 179 /*********************** END OF DEBUGGING DEFINITIONS ********************/ 180 181 /*** Management Structure - Memory Layout 182 * For every MMU table in the sun3x pmap system there must be a way to 183 * manage it; we must know which process is using it, what other tables 184 * depend on it, and whether or not it contains any locked pages. This 185 * is solved by the creation of 'table management' or 'tmgr' 186 * structures. One for each MMU table in the system. 187 * 188 * MAP OF MEMORY USED BY THE PMAP SYSTEM 189 * 190 * towards lower memory 191 * kernAbase -> +-------------------------------------------------------+ 192 * | Kernel MMU A level table | 193 * kernBbase -> +-------------------------------------------------------+ 194 * | Kernel MMU B level tables | 195 * kernCbase -> +-------------------------------------------------------+ 196 * | | 197 * | Kernel MMU C level tables | 198 * | | 199 * mmuCbase -> +-------------------------------------------------------+ 200 * | User MMU C level tables | 201 * mmuAbase -> +-------------------------------------------------------+ 202 * | | 203 * | User MMU A level tables | 204 * | | 205 * mmuBbase -> +-------------------------------------------------------+ 206 * | User MMU B level tables | 207 * tmgrAbase -> +-------------------------------------------------------+ 208 * | TMGR A level table structures | 209 * tmgrBbase -> +-------------------------------------------------------+ 210 * | TMGR B level table structures | 211 * tmgrCbase -> +-------------------------------------------------------+ 212 * | TMGR C level table structures | 213 * pvbase -> +-------------------------------------------------------+ 214 * | Physical to Virtual mapping table (list heads) | 215 * pvebase -> +-------------------------------------------------------+ 216 * | Physical to Virtual mapping table (list elements) | 217 * | | 218 * +-------------------------------------------------------+ 219 * towards higher memory 220 * 221 * For every A table in the MMU A area, there will be a corresponding 222 * a_tmgr structure in the TMGR A area. The same will be true for 223 * the B and C tables. This arrangement will make it easy to find the 224 * controling tmgr structure for any table in the system by use of 225 * (relatively) simple macros. 226 */ 227 228 /* 229 * Global variables for storing the base addresses for the areas 230 * labeled above. 231 */ 232 static vaddr_t kernAphys; 233 static mmu_long_dte_t *kernAbase; 234 static mmu_short_dte_t *kernBbase; 235 static mmu_short_pte_t *kernCbase; 236 static mmu_short_pte_t *mmuCbase; 237 static mmu_short_dte_t *mmuBbase; 238 static mmu_long_dte_t *mmuAbase; 239 static a_tmgr_t *Atmgrbase; 240 static b_tmgr_t *Btmgrbase; 241 static c_tmgr_t *Ctmgrbase; 242 static pv_t *pvbase; 243 static pv_elem_t *pvebase; 244 static struct pmap kernel_pmap; 245 struct pmap *const kernel_pmap_ptr = &kernel_pmap; 246 247 /* 248 * This holds the CRP currently loaded into the MMU. 249 */ 250 struct mmu_rootptr kernel_crp; 251 252 /* 253 * Just all around global variables. 254 */ 255 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool; 256 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool; 257 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool; 258 259 260 /* 261 * Flags used to mark the safety/availability of certain operations or 262 * resources. 263 */ 264 /* Safe to use pmap_bootstrap_alloc(). */ 265 static bool bootstrap_alloc_enabled = false; 266 /* Temporary virtual pages are in use */ 267 int tmp_vpages_inuse; 268 269 /* 270 * XXX: For now, retain the traditional variables that were 271 * used in the old pmap/vm interface (without NONCONTIG). 272 */ 273 /* Kernel virtual address space available: */ 274 vaddr_t virtual_avail, virtual_end; 275 /* Physical address space available: */ 276 paddr_t avail_start, avail_end; 277 278 /* This keep track of the end of the contiguously mapped range. */ 279 vaddr_t virtual_contig_end; 280 281 /* Physical address used by pmap_next_page() */ 282 paddr_t avail_next; 283 284 /* These are used by pmap_copy_page(), etc. */ 285 vaddr_t tmp_vpages[2]; 286 287 /* memory pool for pmap structures */ 288 struct pool pmap_pmap_pool; 289 290 /* 291 * The 3/80 is the only member of the sun3x family that has non-contiguous 292 * physical memory. Memory is divided into 4 banks which are physically 293 * locatable on the system board. Although the size of these banks varies 294 * with the size of memory they contain, their base addresses are 295 * permenently fixed. The following structure, which describes these 296 * banks, is initialized by pmap_bootstrap() after it reads from a similar 297 * structure provided by the ROM Monitor. 298 * 299 * For the other machines in the sun3x architecture which do have contiguous 300 * RAM, this list will have only one entry, which will describe the entire 301 * range of available memory. 302 */ 303 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS]; 304 u_int total_phys_mem; 305 306 /*************************************************************************/ 307 308 /* 309 * XXX - Should "tune" these based on statistics. 310 * 311 * My first guess about the relative numbers of these needed is 312 * based on the fact that a "typical" process will have several 313 * pages mapped at low virtual addresses (text, data, bss), then 314 * some mapped shared libraries, and then some stack pages mapped 315 * near the high end of the VA space. Each process can use only 316 * one A table, and most will use only two B tables (maybe three) 317 * and probably about four C tables. Therefore, the first guess 318 * at the relative numbers of these needed is 1:2:4 -gwr 319 * 320 * The number of C tables needed is closely related to the amount 321 * of physical memory available plus a certain amount attributable 322 * to the use of double mappings. With a few simulation statistics 323 * we can find a reasonably good estimation of this unknown value. 324 * Armed with that and the above ratios, we have a good idea of what 325 * is needed at each level. -j 326 * 327 * Note: It is not physical memory memory size, but the total mapped 328 * virtual space required by the combined working sets of all the 329 * currently _runnable_ processes. (Sleeping ones don't count.) 330 * The amount of physical memory should be irrelevant. -gwr 331 */ 332 #ifdef FIXED_NTABLES 333 #define NUM_A_TABLES 16 334 #define NUM_B_TABLES 32 335 #define NUM_C_TABLES 64 336 #else 337 unsigned int NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES; 338 #endif /* FIXED_NTABLES */ 339 340 /* 341 * This determines our total virtual mapping capacity. 342 * Yes, it is a FIXED value so we can pre-allocate. 343 */ 344 #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE) 345 346 /* 347 * The size of the Kernel Virtual Address Space (KVAS) 348 * for purposes of MMU table allocation is -KERNBASE 349 * (length from KERNBASE to 0xFFFFffff) 350 */ 351 #define KVAS_SIZE (-KERNBASE3X) 352 353 /* Numbers of kernel MMU tables to support KVAS_SIZE. */ 354 #define KERN_B_TABLES (KVAS_SIZE >> MMU_TIA_SHIFT) 355 #define KERN_C_TABLES (KVAS_SIZE >> MMU_TIB_SHIFT) 356 #define NUM_KERN_PTES (KVAS_SIZE >> MMU_TIC_SHIFT) 357 358 /*************************** MISCELANEOUS MACROS *************************/ 359 #define pmap_lock(pmap) simple_lock(&pmap->pm_lock) 360 #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock) 361 #define pmap_add_ref(pmap) ++pmap->pm_refcount 362 #define pmap_del_ref(pmap) --pmap->pm_refcount 363 #define pmap_refcount(pmap) pmap->pm_refcount 364 365 void *pmap_bootstrap_alloc(int); 366 367 static INLINE void *mmu_ptov(paddr_t); 368 static INLINE paddr_t mmu_vtop(void *); 369 370 #if 0 371 static INLINE a_tmgr_t *mmuA2tmgr(mmu_long_dte_t *); 372 #endif /* 0 */ 373 static INLINE b_tmgr_t *mmuB2tmgr(mmu_short_dte_t *); 374 static INLINE c_tmgr_t *mmuC2tmgr(mmu_short_pte_t *); 375 376 static INLINE pv_t *pa2pv(paddr_t); 377 static INLINE int pteidx(mmu_short_pte_t *); 378 static INLINE pmap_t current_pmap(void); 379 380 /* 381 * We can always convert between virtual and physical addresses 382 * for anything in the range [KERNBASE ... avail_start] because 383 * that range is GUARANTEED to be mapped linearly. 384 * We rely heavily upon this feature! 385 */ 386 static INLINE void * 387 mmu_ptov(paddr_t pa) 388 { 389 vaddr_t va; 390 391 va = (pa + KERNBASE3X); 392 #ifdef PMAP_DEBUG 393 if ((va < KERNBASE3X) || (va >= virtual_contig_end)) 394 panic("mmu_ptov"); 395 #endif 396 return (void *)va; 397 } 398 399 static INLINE paddr_t 400 mmu_vtop(void *vva) 401 { 402 vaddr_t va; 403 404 va = (vaddr_t)vva; 405 #ifdef PMAP_DEBUG 406 if ((va < KERNBASE3X) || (va >= virtual_contig_end)) 407 panic("mmu_vtop"); 408 #endif 409 return va - KERNBASE3X; 410 } 411 412 /* 413 * These macros map MMU tables to their corresponding manager structures. 414 * They are needed quite often because many of the pointers in the pmap 415 * system reference MMU tables and not the structures that control them. 416 * There needs to be a way to find one when given the other and these 417 * macros do so by taking advantage of the memory layout described above. 418 * Here's a quick step through the first macro, mmuA2tmgr(): 419 * 420 * 1) find the offset of the given MMU A table from the base of its table 421 * pool (table - mmuAbase). 422 * 2) convert this offset into a table index by dividing it by the 423 * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE) 424 * 3) use this index to select the corresponding 'A' table manager 425 * structure from the 'A' table manager pool (Atmgrbase[index]). 426 */ 427 /* This function is not currently used. */ 428 #if 0 429 static INLINE a_tmgr_t * 430 mmuA2tmgr(mmu_long_dte_t *mmuAtbl) 431 { 432 int idx; 433 434 /* Which table is this in? */ 435 idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE; 436 #ifdef PMAP_DEBUG 437 if ((idx < 0) || (idx >= NUM_A_TABLES)) 438 panic("mmuA2tmgr"); 439 #endif 440 return &Atmgrbase[idx]; 441 } 442 #endif /* 0 */ 443 444 static INLINE b_tmgr_t * 445 mmuB2tmgr(mmu_short_dte_t *mmuBtbl) 446 { 447 int idx; 448 449 /* Which table is this in? */ 450 idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE; 451 #ifdef PMAP_DEBUG 452 if ((idx < 0) || (idx >= NUM_B_TABLES)) 453 panic("mmuB2tmgr"); 454 #endif 455 return &Btmgrbase[idx]; 456 } 457 458 /* mmuC2tmgr INTERNAL 459 ** 460 * Given a pte known to belong to a C table, return the address of 461 * that table's management structure. 462 */ 463 static INLINE c_tmgr_t * 464 mmuC2tmgr(mmu_short_pte_t *mmuCtbl) 465 { 466 int idx; 467 468 /* Which table is this in? */ 469 idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE; 470 #ifdef PMAP_DEBUG 471 if ((idx < 0) || (idx >= NUM_C_TABLES)) 472 panic("mmuC2tmgr"); 473 #endif 474 return &Ctmgrbase[idx]; 475 } 476 477 /* This is now a function call below. 478 * #define pa2pv(pa) \ 479 * (&pvbase[(unsigned long)\ 480 * m68k_btop(pa)\ 481 * ]) 482 */ 483 484 /* pa2pv INTERNAL 485 ** 486 * Return the pv_list_head element which manages the given physical 487 * address. 488 */ 489 static INLINE pv_t * 490 pa2pv(paddr_t pa) 491 { 492 struct pmap_physmem_struct *bank; 493 int idx; 494 495 bank = &avail_mem[0]; 496 while (pa >= bank->pmem_end) 497 bank = bank->pmem_next; 498 499 pa -= bank->pmem_start; 500 idx = bank->pmem_pvbase + m68k_btop(pa); 501 #ifdef PMAP_DEBUG 502 if ((idx < 0) || (idx >= physmem)) 503 panic("pa2pv"); 504 #endif 505 return &pvbase[idx]; 506 } 507 508 /* pteidx INTERNAL 509 ** 510 * Return the index of the given PTE within the entire fixed table of 511 * PTEs. 512 */ 513 static INLINE int 514 pteidx(mmu_short_pte_t *pte) 515 { 516 517 return pte - kernCbase; 518 } 519 520 /* 521 * This just offers a place to put some debugging checks, 522 * and reduces the number of places "curlwp" appears... 523 */ 524 static INLINE pmap_t 525 current_pmap(void) 526 { 527 struct vmspace *vm; 528 struct vm_map *map; 529 pmap_t pmap; 530 531 vm = curproc->p_vmspace; 532 map = &vm->vm_map; 533 pmap = vm_map_pmap(map); 534 535 return pmap; 536 } 537 538 539 /*************************** FUNCTION DEFINITIONS ************************ 540 * These appear here merely for the compiler to enforce type checking on * 541 * all function calls. * 542 *************************************************************************/ 543 544 /* 545 * Internal functions 546 */ 547 a_tmgr_t *get_a_table(void); 548 b_tmgr_t *get_b_table(void); 549 c_tmgr_t *get_c_table(void); 550 int free_a_table(a_tmgr_t *, bool); 551 int free_b_table(b_tmgr_t *, bool); 552 int free_c_table(c_tmgr_t *, bool); 553 554 void pmap_bootstrap_aalign(int); 555 void pmap_alloc_usermmu(void); 556 void pmap_alloc_usertmgr(void); 557 void pmap_alloc_pv(void); 558 void pmap_init_a_tables(void); 559 void pmap_init_b_tables(void); 560 void pmap_init_c_tables(void); 561 void pmap_init_pv(void); 562 void pmap_clear_pv(paddr_t, int); 563 static INLINE bool is_managed(paddr_t); 564 565 bool pmap_remove_a(a_tmgr_t *, vaddr_t, vaddr_t); 566 bool pmap_remove_b(b_tmgr_t *, vaddr_t, vaddr_t); 567 bool pmap_remove_c(c_tmgr_t *, vaddr_t, vaddr_t); 568 void pmap_remove_pte(mmu_short_pte_t *); 569 570 void pmap_enter_kernel(vaddr_t, paddr_t, vm_prot_t); 571 static INLINE void pmap_remove_kernel(vaddr_t, vaddr_t); 572 static INLINE void pmap_protect_kernel(vaddr_t, vaddr_t, vm_prot_t); 573 static INLINE bool pmap_extract_kernel(vaddr_t, paddr_t *); 574 vaddr_t pmap_get_pteinfo(u_int, pmap_t *, c_tmgr_t **); 575 static INLINE int pmap_dereference(pmap_t); 576 577 bool pmap_stroll(pmap_t, vaddr_t, a_tmgr_t **, b_tmgr_t **, c_tmgr_t **, 578 mmu_short_pte_t **, int *, int *, int *); 579 void pmap_bootstrap_copyprom(void); 580 void pmap_takeover_mmu(void); 581 void pmap_bootstrap_setprom(void); 582 static void pmap_page_upload(void); 583 584 #ifdef PMAP_DEBUG 585 /* Debugging function definitions */ 586 void pv_list(paddr_t, int); 587 #endif /* PMAP_DEBUG */ 588 589 /** Interface functions 590 ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG 591 ** defined. 592 ** The new UVM doesn't require them so now INTERNAL. 593 **/ 594 static INLINE void pmap_pinit(pmap_t); 595 static INLINE void pmap_release(pmap_t); 596 597 /********************************** CODE ******************************** 598 * Functions that are called from other parts of the kernel are labeled * 599 * as 'INTERFACE' functions. Functions that are only called from * 600 * within the pmap module are labeled as 'INTERNAL' functions. * 601 * Functions that are internal, but are not (currently) used at all are * 602 * labeled 'INTERNAL_X'. * 603 ************************************************************************/ 604 605 /* pmap_bootstrap INTERNAL 606 ** 607 * Initializes the pmap system. Called at boot time from 608 * locore2.c:_vm_init() 609 * 610 * Reminder: having a pmap_bootstrap_alloc() and also having the VM 611 * system implement pmap_steal_memory() is redundant. 612 * Don't release this code without removing one or the other! 613 */ 614 void 615 pmap_bootstrap(vaddr_t nextva) 616 { 617 struct physmemory *membank; 618 struct pmap_physmem_struct *pmap_membank; 619 vaddr_t va, eva; 620 paddr_t pa; 621 int b, c, i, j; /* running table counts */ 622 int size, resvmem; 623 624 /* 625 * This function is called by __bootstrap after it has 626 * determined the type of machine and made the appropriate 627 * patches to the ROM vectors (XXX- I don't quite know what I meant 628 * by that.) It allocates and sets up enough of the pmap system 629 * to manage the kernel's address space. 630 */ 631 632 /* 633 * Determine the range of kernel virtual and physical 634 * space available. Note that we ABSOLUTELY DEPEND on 635 * the fact that the first bank of memory (4MB) is 636 * mapped linearly to KERNBASE (which we guaranteed in 637 * the first instructions of locore.s). 638 * That is plenty for our bootstrap work. 639 */ 640 virtual_avail = m68k_round_page(nextva); 641 virtual_contig_end = KERNBASE3X + 0x400000; /* +4MB */ 642 virtual_end = VM_MAX_KERNEL_ADDRESS; 643 /* Don't need avail_start til later. */ 644 645 /* We may now call pmap_bootstrap_alloc(). */ 646 bootstrap_alloc_enabled = true; 647 648 /* 649 * This is a somewhat unwrapped loop to deal with 650 * copying the PROM's 'phsymem' banks into the pmap's 651 * banks. The following is always assumed: 652 * 1. There is always at least one bank of memory. 653 * 2. There is always a last bank of memory, and its 654 * pmem_next member must be set to NULL. 655 */ 656 membank = romVectorPtr->v_physmemory; 657 pmap_membank = avail_mem; 658 total_phys_mem = 0; 659 660 for (;;) { /* break on !membank */ 661 pmap_membank->pmem_start = membank->address; 662 pmap_membank->pmem_end = membank->address + membank->size; 663 total_phys_mem += membank->size; 664 membank = membank->next; 665 if (!membank) 666 break; 667 /* This silly syntax arises because pmap_membank 668 * is really a pre-allocated array, but it is put into 669 * use as a linked list. 670 */ 671 pmap_membank->pmem_next = pmap_membank + 1; 672 pmap_membank = pmap_membank->pmem_next; 673 } 674 /* This is the last element. */ 675 pmap_membank->pmem_next = NULL; 676 677 /* 678 * Note: total_phys_mem, physmem represent 679 * actual physical memory, including that 680 * reserved for the PROM monitor. 681 */ 682 physmem = btoc(total_phys_mem); 683 684 /* 685 * Avail_end is set to the first byte of physical memory 686 * after the end of the last bank. We use this only to 687 * determine if a physical address is "managed" memory. 688 * This address range should be reduced to prevent the 689 * physical pages needed by the PROM monitor from being used 690 * in the VM system. 691 */ 692 resvmem = total_phys_mem - *(romVectorPtr->memoryAvail); 693 resvmem = m68k_round_page(resvmem); 694 avail_end = pmap_membank->pmem_end - resvmem; 695 696 /* 697 * First allocate enough kernel MMU tables to map all 698 * of kernel virtual space from KERNBASE to 0xFFFFFFFF. 699 * Note: All must be aligned on 256 byte boundaries. 700 * Start with the level-A table (one of those). 701 */ 702 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE; 703 kernAbase = pmap_bootstrap_alloc(size); 704 memset(kernAbase, 0, size); 705 706 /* Now the level-B kernel tables... */ 707 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES; 708 kernBbase = pmap_bootstrap_alloc(size); 709 memset(kernBbase, 0, size); 710 711 /* Now the level-C kernel tables... */ 712 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES; 713 kernCbase = pmap_bootstrap_alloc(size); 714 memset(kernCbase, 0, size); 715 /* 716 * Note: In order for the PV system to work correctly, the kernel 717 * and user-level C tables must be allocated contiguously. 718 * Nothing should be allocated between here and the allocation of 719 * mmuCbase below. XXX: Should do this as one allocation, and 720 * then compute a pointer for mmuCbase instead of this... 721 * 722 * Allocate user MMU tables. 723 * These must be contiguous with the preceding. 724 */ 725 726 #ifndef FIXED_NTABLES 727 /* 728 * The number of user-level C tables that should be allocated is 729 * related to the size of physical memory. In general, there should 730 * be enough tables to map four times the amount of available RAM. 731 * The extra amount is needed because some table space is wasted by 732 * fragmentation. 733 */ 734 NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE); 735 NUM_B_TABLES = NUM_C_TABLES / 2; 736 NUM_A_TABLES = NUM_B_TABLES / 2; 737 #endif /* !FIXED_NTABLES */ 738 739 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * NUM_C_TABLES; 740 mmuCbase = pmap_bootstrap_alloc(size); 741 742 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * NUM_B_TABLES; 743 mmuBbase = pmap_bootstrap_alloc(size); 744 745 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES; 746 mmuAbase = pmap_bootstrap_alloc(size); 747 748 /* 749 * Fill in the never-changing part of the kernel tables. 750 * For simplicity, the kernel's mappings will be editable as a 751 * flat array of page table entries at kernCbase. The 752 * higher level 'A' and 'B' tables must be initialized to point 753 * to this lower one. 754 */ 755 b = c = 0; 756 757 /* 758 * Invalidate all mappings below KERNBASE in the A table. 759 * This area has already been zeroed out, but it is good 760 * practice to explicitly show that we are interpreting 761 * it as a list of A table descriptors. 762 */ 763 for (i = 0; i < MMU_TIA(KERNBASE3X); i++) { 764 kernAbase[i].addr.raw = 0; 765 } 766 767 /* 768 * Set up the kernel A and B tables so that they will reference the 769 * correct spots in the contiguous table of PTEs allocated for the 770 * kernel's virtual memory space. 771 */ 772 for (i = MMU_TIA(KERNBASE3X); i < MMU_A_TBL_SIZE; i++) { 773 kernAbase[i].attr.raw = 774 MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT; 775 kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]); 776 777 for (j = 0; j < MMU_B_TBL_SIZE; j++) { 778 kernBbase[b + j].attr.raw = 779 mmu_vtop(&kernCbase[c]) | MMU_DT_SHORT; 780 c += MMU_C_TBL_SIZE; 781 } 782 b += MMU_B_TBL_SIZE; 783 } 784 785 pmap_alloc_usermmu(); /* Allocate user MMU tables. */ 786 pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/ 787 pmap_alloc_pv(); /* Allocate physical->virtual map. */ 788 789 /* 790 * We are now done with pmap_bootstrap_alloc(). Round up 791 * `virtual_avail' to the nearest page, and set the flag 792 * to prevent use of pmap_bootstrap_alloc() hereafter. 793 */ 794 pmap_bootstrap_aalign(PAGE_SIZE); 795 bootstrap_alloc_enabled = false; 796 797 /* 798 * Now that we are done with pmap_bootstrap_alloc(), we 799 * must save the virtual and physical addresses of the 800 * end of the linearly mapped range, which are stored in 801 * virtual_contig_end and avail_start, respectively. 802 * These variables will never change after this point. 803 */ 804 virtual_contig_end = virtual_avail; 805 avail_start = virtual_avail - KERNBASE3X; 806 807 /* 808 * `avail_next' is a running pointer used by pmap_next_page() to 809 * keep track of the next available physical page to be handed 810 * to the VM system during its initialization, in which it 811 * asks for physical pages, one at a time. 812 */ 813 avail_next = avail_start; 814 815 /* 816 * Now allocate some virtual addresses, but not the physical pages 817 * behind them. Note that virtual_avail is already page-aligned. 818 * 819 * tmp_vpages[] is an array of two virtual pages used for temporary 820 * kernel mappings in the pmap module to facilitate various physical 821 * address-oritented operations. 822 */ 823 tmp_vpages[0] = virtual_avail; 824 virtual_avail += PAGE_SIZE; 825 tmp_vpages[1] = virtual_avail; 826 virtual_avail += PAGE_SIZE; 827 828 /** Initialize the PV system **/ 829 pmap_init_pv(); 830 831 /* 832 * Fill in the kernel_pmap structure and kernel_crp. 833 */ 834 kernAphys = mmu_vtop(kernAbase); 835 kernel_pmap.pm_a_tmgr = NULL; 836 kernel_pmap.pm_a_phys = kernAphys; 837 kernel_pmap.pm_refcount = 1; /* always in use */ 838 simple_lock_init(&kernel_pmap.pm_lock); 839 840 kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG; 841 kernel_crp.rp_addr = kernAphys; 842 843 /* 844 * Now pmap_enter_kernel() may be used safely and will be 845 * the main interface used hereafter to modify the kernel's 846 * virtual address space. Note that since we are still running 847 * under the PROM's address table, none of these table modifications 848 * actually take effect until pmap_takeover_mmu() is called. 849 * 850 * Note: Our tables do NOT have the PROM linear mappings! 851 * Only the mappings created here exist in our tables, so 852 * remember to map anything we expect to use. 853 */ 854 va = (vaddr_t)KERNBASE3X; 855 pa = 0; 856 857 /* 858 * The first page of the kernel virtual address space is the msgbuf 859 * page. The page attributes (data, non-cached) are set here, while 860 * the address is assigned to this global pointer in cpu_startup(). 861 * It is non-cached, mostly due to paranoia. 862 */ 863 pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL); 864 va += PAGE_SIZE; 865 pa += PAGE_SIZE; 866 867 /* Next page is used as the temporary stack. */ 868 pmap_enter_kernel(va, pa, VM_PROT_ALL); 869 va += PAGE_SIZE; 870 pa += PAGE_SIZE; 871 872 /* 873 * Map all of the kernel's text segment as read-only and cacheable. 874 * (Cacheable is implied by default). Unfortunately, the last bytes 875 * of kernel text and the first bytes of kernel data will often be 876 * sharing the same page. Therefore, the last page of kernel text 877 * has to be mapped as read/write, to accommodate the data. 878 */ 879 eva = m68k_trunc_page((vaddr_t)etext); 880 for (; va < eva; va += PAGE_SIZE, pa += PAGE_SIZE) 881 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE); 882 883 /* 884 * Map all of the kernel's data as read/write and cacheable. 885 * This includes: data, BSS, symbols, and everything in the 886 * contiguous memory used by pmap_bootstrap_alloc() 887 */ 888 for (; pa < avail_start; va += PAGE_SIZE, pa += PAGE_SIZE) 889 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE); 890 891 /* 892 * At this point we are almost ready to take over the MMU. But first 893 * we must save the PROM's address space in our map, as we call its 894 * routines and make references to its data later in the kernel. 895 */ 896 pmap_bootstrap_copyprom(); 897 pmap_takeover_mmu(); 898 pmap_bootstrap_setprom(); 899 900 /* Notify the VM system of our page size. */ 901 uvmexp.pagesize = PAGE_SIZE; 902 uvm_setpagesize(); 903 904 pmap_page_upload(); 905 } 906 907 908 /* pmap_alloc_usermmu INTERNAL 909 ** 910 * Called from pmap_bootstrap() to allocate MMU tables that will 911 * eventually be used for user mappings. 912 */ 913 void 914 pmap_alloc_usermmu(void) 915 { 916 917 /* XXX: Moved into caller. */ 918 } 919 920 /* pmap_alloc_pv INTERNAL 921 ** 922 * Called from pmap_bootstrap() to allocate the physical 923 * to virtual mapping list. Each physical page of memory 924 * in the system has a corresponding element in this list. 925 */ 926 void 927 pmap_alloc_pv(void) 928 { 929 int i; 930 unsigned int total_mem; 931 932 /* 933 * Allocate a pv_head structure for every page of physical 934 * memory that will be managed by the system. Since memory on 935 * the 3/80 is non-contiguous, we cannot arrive at a total page 936 * count by subtraction of the lowest available address from the 937 * highest, but rather we have to step through each memory 938 * bank and add the number of pages in each to the total. 939 * 940 * At this time we also initialize the offset of each bank's 941 * starting pv_head within the pv_head list so that the physical 942 * memory state routines (pmap_is_referenced(), 943 * pmap_is_modified(), et al.) can quickly find coresponding 944 * pv_heads in spite of the non-contiguity. 945 */ 946 total_mem = 0; 947 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) { 948 avail_mem[i].pmem_pvbase = m68k_btop(total_mem); 949 total_mem += avail_mem[i].pmem_end - avail_mem[i].pmem_start; 950 if (avail_mem[i].pmem_next == NULL) 951 break; 952 } 953 pvbase = (pv_t *)pmap_bootstrap_alloc(sizeof(pv_t) * 954 m68k_btop(total_phys_mem)); 955 } 956 957 /* pmap_alloc_usertmgr INTERNAL 958 ** 959 * Called from pmap_bootstrap() to allocate the structures which 960 * facilitate management of user MMU tables. Each user MMU table 961 * in the system has one such structure associated with it. 962 */ 963 void 964 pmap_alloc_usertmgr(void) 965 { 966 /* Allocate user MMU table managers */ 967 /* It would be a lot simpler to just make these BSS, but */ 968 /* we may want to change their size at boot time... -j */ 969 Atmgrbase = 970 (a_tmgr_t *)pmap_bootstrap_alloc(sizeof(a_tmgr_t) * NUM_A_TABLES); 971 Btmgrbase = 972 (b_tmgr_t *)pmap_bootstrap_alloc(sizeof(b_tmgr_t) * NUM_B_TABLES); 973 Ctmgrbase = 974 (c_tmgr_t *)pmap_bootstrap_alloc(sizeof(c_tmgr_t) * NUM_C_TABLES); 975 976 /* 977 * Allocate PV list elements for the physical to virtual 978 * mapping system. 979 */ 980 pvebase = (pv_elem_t *)pmap_bootstrap_alloc(sizeof(pv_elem_t) * 981 (NUM_USER_PTES + NUM_KERN_PTES)); 982 } 983 984 /* pmap_bootstrap_copyprom() INTERNAL 985 ** 986 * Copy the PROM mappings into our own tables. Note, we 987 * can use physical addresses until __bootstrap returns. 988 */ 989 void 990 pmap_bootstrap_copyprom(void) 991 { 992 struct sunromvec *romp; 993 int *mon_ctbl; 994 mmu_short_pte_t *kpte; 995 int i, len; 996 997 romp = romVectorPtr; 998 999 /* 1000 * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND 1001 * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE 1002 */ 1003 mon_ctbl = *romp->monptaddr; 1004 i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE3X); 1005 kpte = &kernCbase[i]; 1006 len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE); 1007 1008 for (i = 0; i < len; i++) { 1009 kpte[i].attr.raw = mon_ctbl[i]; 1010 } 1011 1012 /* 1013 * Copy the mappings at MON_DVMA_BASE (to the end). 1014 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE. 1015 * Actually, we only want the last page, which the 1016 * PROM has set up for use by the "ie" driver. 1017 * (The i82686 needs its SCP there.) 1018 * If we copy all the mappings, pmap_enter_kernel 1019 * may complain about finding valid PTEs that are 1020 * not recorded in our PV lists... 1021 */ 1022 mon_ctbl = *romp->shadowpteaddr; 1023 i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE3X); 1024 kpte = &kernCbase[i]; 1025 len = m68k_btop(SUN3X_MON_DVMA_SIZE); 1026 for (i = (len - 1); i < len; i++) { 1027 kpte[i].attr.raw = mon_ctbl[i]; 1028 } 1029 } 1030 1031 /* pmap_takeover_mmu INTERNAL 1032 ** 1033 * Called from pmap_bootstrap() after it has copied enough of the 1034 * PROM mappings into the kernel map so that we can use our own 1035 * MMU table. 1036 */ 1037 void 1038 pmap_takeover_mmu(void) 1039 { 1040 1041 loadcrp(&kernel_crp); 1042 } 1043 1044 /* pmap_bootstrap_setprom() INTERNAL 1045 ** 1046 * Set the PROM mappings so it can see kernel space. 1047 * Note that physical addresses are used here, which 1048 * we can get away with because this runs with the 1049 * low 1GB set for transparent translation. 1050 */ 1051 void 1052 pmap_bootstrap_setprom(void) 1053 { 1054 mmu_long_dte_t *mon_dte; 1055 extern struct mmu_rootptr mon_crp; 1056 int i; 1057 1058 mon_dte = (mmu_long_dte_t *)mon_crp.rp_addr; 1059 for (i = MMU_TIA(KERNBASE3X); i < MMU_TIA(KERN_END3X); i++) { 1060 mon_dte[i].attr.raw = kernAbase[i].attr.raw; 1061 mon_dte[i].addr.raw = kernAbase[i].addr.raw; 1062 } 1063 } 1064 1065 1066 /* pmap_init INTERFACE 1067 ** 1068 * Called at the end of vm_init() to set up the pmap system to go 1069 * into full time operation. All initialization of kernel_pmap 1070 * should be already done by now, so this should just do things 1071 * needed for user-level pmaps to work. 1072 */ 1073 void 1074 pmap_init(void) 1075 { 1076 1077 /** Initialize the manager pools **/ 1078 TAILQ_INIT(&a_pool); 1079 TAILQ_INIT(&b_pool); 1080 TAILQ_INIT(&c_pool); 1081 1082 /************************************************************** 1083 * Initialize all tmgr structures and MMU tables they manage. * 1084 **************************************************************/ 1085 /** Initialize A tables **/ 1086 pmap_init_a_tables(); 1087 /** Initialize B tables **/ 1088 pmap_init_b_tables(); 1089 /** Initialize C tables **/ 1090 pmap_init_c_tables(); 1091 1092 /** Initialize the pmap pools **/ 1093 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl", 1094 &pool_allocator_nointr, IPL_NONE); 1095 } 1096 1097 /* pmap_init_a_tables() INTERNAL 1098 ** 1099 * Initializes all A managers, their MMU A tables, and inserts 1100 * them into the A manager pool for use by the system. 1101 */ 1102 void 1103 pmap_init_a_tables(void) 1104 { 1105 int i; 1106 a_tmgr_t *a_tbl; 1107 1108 for (i = 0; i < NUM_A_TABLES; i++) { 1109 /* Select the next available A manager from the pool */ 1110 a_tbl = &Atmgrbase[i]; 1111 1112 /* 1113 * Clear its parent entry. Set its wired and valid 1114 * entry count to zero. 1115 */ 1116 a_tbl->at_parent = NULL; 1117 a_tbl->at_wcnt = a_tbl->at_ecnt = 0; 1118 1119 /* Assign it the next available MMU A table from the pool */ 1120 a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE]; 1121 1122 /* 1123 * Initialize the MMU A table with the table in the `lwp0', 1124 * or kernel, mapping. This ensures that every process has 1125 * the kernel mapped in the top part of its address space. 1126 */ 1127 memcpy(a_tbl->at_dtbl, kernAbase, 1128 MMU_A_TBL_SIZE * sizeof(mmu_long_dte_t)); 1129 1130 /* 1131 * Finally, insert the manager into the A pool, 1132 * making it ready to be used by the system. 1133 */ 1134 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link); 1135 } 1136 } 1137 1138 /* pmap_init_b_tables() INTERNAL 1139 ** 1140 * Initializes all B table managers, their MMU B tables, and 1141 * inserts them into the B manager pool for use by the system. 1142 */ 1143 void 1144 pmap_init_b_tables(void) 1145 { 1146 int i, j; 1147 b_tmgr_t *b_tbl; 1148 1149 for (i = 0; i < NUM_B_TABLES; i++) { 1150 /* Select the next available B manager from the pool */ 1151 b_tbl = &Btmgrbase[i]; 1152 1153 b_tbl->bt_parent = NULL; /* clear its parent, */ 1154 b_tbl->bt_pidx = 0; /* parent index, */ 1155 b_tbl->bt_wcnt = 0; /* wired entry count, */ 1156 b_tbl->bt_ecnt = 0; /* valid entry count. */ 1157 1158 /* Assign it the next available MMU B table from the pool */ 1159 b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE]; 1160 1161 /* Invalidate every descriptor in the table */ 1162 for (j = 0; j < MMU_B_TBL_SIZE; j++) 1163 b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID; 1164 1165 /* Insert the manager into the B pool */ 1166 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link); 1167 } 1168 } 1169 1170 /* pmap_init_c_tables() INTERNAL 1171 ** 1172 * Initializes all C table managers, their MMU C tables, and 1173 * inserts them into the C manager pool for use by the system. 1174 */ 1175 void 1176 pmap_init_c_tables(void) 1177 { 1178 int i, j; 1179 c_tmgr_t *c_tbl; 1180 1181 for (i = 0; i < NUM_C_TABLES; i++) { 1182 /* Select the next available C manager from the pool */ 1183 c_tbl = &Ctmgrbase[i]; 1184 1185 c_tbl->ct_parent = NULL; /* clear its parent, */ 1186 c_tbl->ct_pidx = 0; /* parent index, */ 1187 c_tbl->ct_wcnt = 0; /* wired entry count, */ 1188 c_tbl->ct_ecnt = 0; /* valid entry count, */ 1189 c_tbl->ct_pmap = NULL; /* parent pmap, */ 1190 c_tbl->ct_va = 0; /* base of managed range */ 1191 1192 /* Assign it the next available MMU C table from the pool */ 1193 c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE]; 1194 1195 for (j = 0; j < MMU_C_TBL_SIZE; j++) 1196 c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID; 1197 1198 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link); 1199 } 1200 } 1201 1202 /* pmap_init_pv() INTERNAL 1203 ** 1204 * Initializes the Physical to Virtual mapping system. 1205 */ 1206 void 1207 pmap_init_pv(void) 1208 { 1209 int i; 1210 1211 /* Initialize every PV head. */ 1212 for (i = 0; i < m68k_btop(total_phys_mem); i++) { 1213 pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */ 1214 pvbase[i].pv_flags = 0; /* Zero out page flags */ 1215 } 1216 } 1217 1218 /* is_managed INTERNAL 1219 ** 1220 * Determine if the given physical address is managed by the PV system. 1221 * Note that this logic assumes that no one will ask for the status of 1222 * addresses which lie in-between the memory banks on the 3/80. If they 1223 * do so, it will falsely report that it is managed. 1224 * 1225 * Note: A "managed" address is one that was reported to the VM system as 1226 * a "usable page" during system startup. As such, the VM system expects the 1227 * pmap module to keep an accurate track of the useage of those pages. 1228 * Any page not given to the VM system at startup does not exist (as far as 1229 * the VM system is concerned) and is therefore "unmanaged." Examples are 1230 * those pages which belong to the ROM monitor and the memory allocated before 1231 * the VM system was started. 1232 */ 1233 static INLINE bool 1234 is_managed(paddr_t pa) 1235 { 1236 if (pa >= avail_start && pa < avail_end) 1237 return true; 1238 else 1239 return false; 1240 } 1241 1242 /* get_a_table INTERNAL 1243 ** 1244 * Retrieve and return a level A table for use in a user map. 1245 */ 1246 a_tmgr_t * 1247 get_a_table(void) 1248 { 1249 a_tmgr_t *tbl; 1250 pmap_t pmap; 1251 1252 /* Get the top A table in the pool */ 1253 tbl = TAILQ_FIRST(&a_pool); 1254 if (tbl == NULL) { 1255 /* 1256 * XXX - Instead of panicking here and in other get_x_table 1257 * functions, we do have the option of sleeping on the head of 1258 * the table pool. Any function which updates the table pool 1259 * would then issue a wakeup() on the head, thus waking up any 1260 * processes waiting for a table. 1261 * 1262 * Actually, the place to sleep would be when some process 1263 * asks for a "wired" mapping that would run us short of 1264 * mapping resources. This design DEPENDS on always having 1265 * some mapping resources in the pool for stealing, so we 1266 * must make sure we NEVER let the pool become empty. -gwr 1267 */ 1268 panic("get_a_table: out of A tables."); 1269 } 1270 1271 TAILQ_REMOVE(&a_pool, tbl, at_link); 1272 /* 1273 * If the table has a non-null parent pointer then it is in use. 1274 * Forcibly abduct it from its parent and clear its entries. 1275 * No re-entrancy worries here. This table would not be in the 1276 * table pool unless it was available for use. 1277 * 1278 * Note that the second argument to free_a_table() is false. This 1279 * indicates that the table should not be relinked into the A table 1280 * pool. That is a job for the function that called us. 1281 */ 1282 if (tbl->at_parent) { 1283 KASSERT(tbl->at_wcnt == 0); 1284 pmap = tbl->at_parent; 1285 free_a_table(tbl, false); 1286 pmap->pm_a_tmgr = NULL; 1287 pmap->pm_a_phys = kernAphys; 1288 } 1289 return tbl; 1290 } 1291 1292 /* get_b_table INTERNAL 1293 ** 1294 * Return a level B table for use. 1295 */ 1296 b_tmgr_t * 1297 get_b_table(void) 1298 { 1299 b_tmgr_t *tbl; 1300 1301 /* See 'get_a_table' for comments. */ 1302 tbl = TAILQ_FIRST(&b_pool); 1303 if (tbl == NULL) 1304 panic("get_b_table: out of B tables."); 1305 TAILQ_REMOVE(&b_pool, tbl, bt_link); 1306 if (tbl->bt_parent) { 1307 KASSERT(tbl->bt_wcnt == 0); 1308 tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID; 1309 tbl->bt_parent->at_ecnt--; 1310 free_b_table(tbl, false); 1311 } 1312 return tbl; 1313 } 1314 1315 /* get_c_table INTERNAL 1316 ** 1317 * Return a level C table for use. 1318 */ 1319 c_tmgr_t * 1320 get_c_table(void) 1321 { 1322 c_tmgr_t *tbl; 1323 1324 /* See 'get_a_table' for comments */ 1325 tbl = TAILQ_FIRST(&c_pool); 1326 if (tbl == NULL) 1327 panic("get_c_table: out of C tables."); 1328 TAILQ_REMOVE(&c_pool, tbl, ct_link); 1329 if (tbl->ct_parent) { 1330 KASSERT(tbl->ct_wcnt == 0); 1331 tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID; 1332 tbl->ct_parent->bt_ecnt--; 1333 free_c_table(tbl, false); 1334 } 1335 return tbl; 1336 } 1337 1338 /* 1339 * The following 'free_table' and 'steal_table' functions are called to 1340 * detach tables from their current obligations (parents and children) and 1341 * prepare them for reuse in another mapping. 1342 * 1343 * Free_table is used when the calling function will handle the fate 1344 * of the parent table, such as returning it to the free pool when it has 1345 * no valid entries. Functions that do not want to handle this should 1346 * call steal_table, in which the parent table's descriptors and entry 1347 * count are automatically modified when this table is removed. 1348 */ 1349 1350 /* free_a_table INTERNAL 1351 ** 1352 * Unmaps the given A table and all child tables from their current 1353 * mappings. Returns the number of pages that were invalidated. 1354 * If 'relink' is true, the function will return the table to the head 1355 * of the available table pool. 1356 * 1357 * Cache note: The MC68851 will automatically flush all 1358 * descriptors derived from a given A table from its 1359 * Automatic Translation Cache (ATC) if we issue a 1360 * 'PFLUSHR' instruction with the base address of the 1361 * table. This function should do, and does so. 1362 * Note note: We are using an MC68030 - there is no 1363 * PFLUSHR. 1364 */ 1365 int 1366 free_a_table(a_tmgr_t *a_tbl, bool relink) 1367 { 1368 int i, removed_cnt; 1369 mmu_long_dte_t *dte; 1370 mmu_short_dte_t *dtbl; 1371 b_tmgr_t *b_tbl; 1372 uint8_t at_wired, bt_wired; 1373 1374 /* 1375 * Flush the ATC cache of all cached descriptors derived 1376 * from this table. 1377 * Sun3x does not use 68851's cached table feature 1378 * flush_atc_crp(mmu_vtop(a_tbl->dte)); 1379 */ 1380 1381 /* 1382 * Remove any pending cache flushes that were designated 1383 * for the pmap this A table belongs to. 1384 * a_tbl->parent->atc_flushq[0] = 0; 1385 * Not implemented in sun3x. 1386 */ 1387 1388 /* 1389 * All A tables in the system should retain a map for the 1390 * kernel. If the table contains any valid descriptors 1391 * (other than those for the kernel area), invalidate them all, 1392 * stopping short of the kernel's entries. 1393 */ 1394 removed_cnt = 0; 1395 at_wired = a_tbl->at_wcnt; 1396 if (a_tbl->at_ecnt) { 1397 dte = a_tbl->at_dtbl; 1398 for (i = 0; i < MMU_TIA(KERNBASE3X); i++) { 1399 /* 1400 * If a table entry points to a valid B table, free 1401 * it and its children. 1402 */ 1403 if (MMU_VALID_DT(dte[i])) { 1404 /* 1405 * The following block does several things, 1406 * from innermost expression to the 1407 * outermost: 1408 * 1) It extracts the base (cc 1996) 1409 * address of the B table pointed 1410 * to in the A table entry dte[i]. 1411 * 2) It converts this base address into 1412 * the virtual address it can be 1413 * accessed with. (all MMU tables point 1414 * to physical addresses.) 1415 * 3) It finds the corresponding manager 1416 * structure which manages this MMU table. 1417 * 4) It frees the manager structure. 1418 * (This frees the MMU table and all 1419 * child tables. See 'free_b_table' for 1420 * details.) 1421 */ 1422 dtbl = mmu_ptov(dte[i].addr.raw); 1423 b_tbl = mmuB2tmgr(dtbl); 1424 bt_wired = b_tbl->bt_wcnt; 1425 removed_cnt += free_b_table(b_tbl, true); 1426 if (bt_wired) 1427 a_tbl->at_wcnt--; 1428 dte[i].attr.raw = MMU_DT_INVALID; 1429 } 1430 } 1431 a_tbl->at_ecnt = 0; 1432 } 1433 KASSERT(a_tbl->at_wcnt == 0); 1434 1435 if (relink) { 1436 a_tbl->at_parent = NULL; 1437 if (!at_wired) 1438 TAILQ_REMOVE(&a_pool, a_tbl, at_link); 1439 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link); 1440 } 1441 return removed_cnt; 1442 } 1443 1444 /* free_b_table INTERNAL 1445 ** 1446 * Unmaps the given B table and all its children from their current 1447 * mappings. Returns the number of pages that were invalidated. 1448 * (For comments, see 'free_a_table()'). 1449 */ 1450 int 1451 free_b_table(b_tmgr_t *b_tbl, bool relink) 1452 { 1453 int i, removed_cnt; 1454 mmu_short_dte_t *dte; 1455 mmu_short_pte_t *dtbl; 1456 c_tmgr_t *c_tbl; 1457 uint8_t bt_wired, ct_wired; 1458 1459 removed_cnt = 0; 1460 bt_wired = b_tbl->bt_wcnt; 1461 if (b_tbl->bt_ecnt) { 1462 dte = b_tbl->bt_dtbl; 1463 for (i = 0; i < MMU_B_TBL_SIZE; i++) { 1464 if (MMU_VALID_DT(dte[i])) { 1465 dtbl = mmu_ptov(MMU_DTE_PA(dte[i])); 1466 c_tbl = mmuC2tmgr(dtbl); 1467 ct_wired = c_tbl->ct_wcnt; 1468 removed_cnt += free_c_table(c_tbl, true); 1469 if (ct_wired) 1470 b_tbl->bt_wcnt--; 1471 dte[i].attr.raw = MMU_DT_INVALID; 1472 } 1473 } 1474 b_tbl->bt_ecnt = 0; 1475 } 1476 KASSERT(b_tbl->bt_wcnt == 0); 1477 1478 if (relink) { 1479 b_tbl->bt_parent = NULL; 1480 if (!bt_wired) 1481 TAILQ_REMOVE(&b_pool, b_tbl, bt_link); 1482 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link); 1483 } 1484 return removed_cnt; 1485 } 1486 1487 /* free_c_table INTERNAL 1488 ** 1489 * Unmaps the given C table from use and returns it to the pool for 1490 * re-use. Returns the number of pages that were invalidated. 1491 * 1492 * This function preserves any physical page modification information 1493 * contained in the page descriptors within the C table by calling 1494 * 'pmap_remove_pte().' 1495 */ 1496 int 1497 free_c_table(c_tmgr_t *c_tbl, bool relink) 1498 { 1499 mmu_short_pte_t *c_pte; 1500 int i, removed_cnt; 1501 uint8_t ct_wired; 1502 1503 removed_cnt = 0; 1504 ct_wired = c_tbl->ct_wcnt; 1505 if (c_tbl->ct_ecnt) { 1506 for (i = 0; i < MMU_C_TBL_SIZE; i++) { 1507 c_pte = &c_tbl->ct_dtbl[i]; 1508 if (MMU_VALID_DT(*c_pte)) { 1509 if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED) 1510 c_tbl->ct_wcnt--; 1511 pmap_remove_pte(c_pte); 1512 removed_cnt++; 1513 } 1514 } 1515 c_tbl->ct_ecnt = 0; 1516 } 1517 KASSERT(c_tbl->ct_wcnt == 0); 1518 1519 if (relink) { 1520 c_tbl->ct_parent = NULL; 1521 if (!ct_wired) 1522 TAILQ_REMOVE(&c_pool, c_tbl, ct_link); 1523 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link); 1524 } 1525 return removed_cnt; 1526 } 1527 1528 1529 /* pmap_remove_pte INTERNAL 1530 ** 1531 * Unmap the given pte and preserve any page modification 1532 * information by transfering it to the pv head of the 1533 * physical page it maps to. This function does not update 1534 * any reference counts because it is assumed that the calling 1535 * function will do so. 1536 */ 1537 void 1538 pmap_remove_pte(mmu_short_pte_t *pte) 1539 { 1540 u_short pv_idx, targ_idx; 1541 paddr_t pa; 1542 pv_t *pv; 1543 1544 pa = MMU_PTE_PA(*pte); 1545 if (is_managed(pa)) { 1546 pv = pa2pv(pa); 1547 targ_idx = pteidx(pte); /* Index of PTE being removed */ 1548 1549 /* 1550 * If the PTE being removed is the first (or only) PTE in 1551 * the list of PTEs currently mapped to this page, remove the 1552 * PTE by changing the index found on the PV head. Otherwise 1553 * a linear search through the list will have to be executed 1554 * in order to find the PVE which points to the PTE being 1555 * removed, so that it may be modified to point to its new 1556 * neighbor. 1557 */ 1558 1559 pv_idx = pv->pv_idx; /* Index of first PTE in PV list */ 1560 if (pv_idx == targ_idx) { 1561 pv->pv_idx = pvebase[targ_idx].pve_next; 1562 } else { 1563 1564 /* 1565 * Find the PV element pointing to the target 1566 * element. Note: may have pv_idx==PVE_EOL 1567 */ 1568 1569 for (;;) { 1570 if (pv_idx == PVE_EOL) { 1571 goto pv_not_found; 1572 } 1573 if (pvebase[pv_idx].pve_next == targ_idx) 1574 break; 1575 pv_idx = pvebase[pv_idx].pve_next; 1576 } 1577 1578 /* 1579 * At this point, pv_idx is the index of the PV 1580 * element just before the target element in the list. 1581 * Unlink the target. 1582 */ 1583 1584 pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next; 1585 } 1586 1587 /* 1588 * Save the mod/ref bits of the pte by simply 1589 * ORing the entire pte onto the pv_flags member 1590 * of the pv structure. 1591 * There is no need to use a separate bit pattern 1592 * for usage information on the pv head than that 1593 * which is used on the MMU ptes. 1594 */ 1595 1596 pv_not_found: 1597 pv->pv_flags |= (u_short) pte->attr.raw; 1598 } 1599 pte->attr.raw = MMU_DT_INVALID; 1600 } 1601 1602 /* pmap_stroll INTERNAL 1603 ** 1604 * Retrieve the addresses of all table managers involved in the mapping of 1605 * the given virtual address. If the table walk completed successfully, 1606 * return true. If it was only partially successful, return false. 1607 * The table walk performed by this function is important to many other 1608 * functions in this module. 1609 * 1610 * Note: This function ought to be easier to read. 1611 */ 1612 bool 1613 pmap_stroll(pmap_t pmap, vaddr_t va, a_tmgr_t **a_tbl, b_tmgr_t **b_tbl, 1614 c_tmgr_t **c_tbl, mmu_short_pte_t **pte, int *a_idx, int *b_idx, 1615 int *pte_idx) 1616 { 1617 mmu_long_dte_t *a_dte; /* A: long descriptor table */ 1618 mmu_short_dte_t *b_dte; /* B: short descriptor table */ 1619 1620 if (pmap == pmap_kernel()) 1621 return false; 1622 1623 /* Does the given pmap have its own A table? */ 1624 *a_tbl = pmap->pm_a_tmgr; 1625 if (*a_tbl == NULL) 1626 return false; /* No. Return unknown. */ 1627 /* Does the A table have a valid B table 1628 * under the corresponding table entry? 1629 */ 1630 *a_idx = MMU_TIA(va); 1631 a_dte = &((*a_tbl)->at_dtbl[*a_idx]); 1632 if (!MMU_VALID_DT(*a_dte)) 1633 return false; /* No. Return unknown. */ 1634 /* Yes. Extract B table from the A table. */ 1635 *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw)); 1636 /* 1637 * Does the B table have a valid C table 1638 * under the corresponding table entry? 1639 */ 1640 *b_idx = MMU_TIB(va); 1641 b_dte = &((*b_tbl)->bt_dtbl[*b_idx]); 1642 if (!MMU_VALID_DT(*b_dte)) 1643 return false; /* No. Return unknown. */ 1644 /* Yes. Extract C table from the B table. */ 1645 *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte))); 1646 *pte_idx = MMU_TIC(va); 1647 *pte = &((*c_tbl)->ct_dtbl[*pte_idx]); 1648 1649 return true; 1650 } 1651 1652 /* pmap_enter INTERFACE 1653 ** 1654 * Called by the kernel to map a virtual address 1655 * to a physical address in the given process map. 1656 * 1657 * Note: this function should apply an exclusive lock 1658 * on the pmap system for its duration. (it certainly 1659 * would save my hair!!) 1660 * This function ought to be easier to read. 1661 */ 1662 int 1663 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1664 { 1665 bool insert, managed; /* Marks the need for PV insertion.*/ 1666 u_short nidx; /* PV list index */ 1667 int mapflags; /* Flags for the mapping (see NOTE1) */ 1668 u_int a_idx, b_idx, pte_idx; /* table indices */ 1669 a_tmgr_t *a_tbl; /* A: long descriptor table manager */ 1670 b_tmgr_t *b_tbl; /* B: short descriptor table manager */ 1671 c_tmgr_t *c_tbl; /* C: short page table manager */ 1672 mmu_long_dte_t *a_dte; /* A: long descriptor table */ 1673 mmu_short_dte_t *b_dte; /* B: short descriptor table */ 1674 mmu_short_pte_t *c_pte; /* C: short page descriptor table */ 1675 pv_t *pv; /* pv list head */ 1676 bool wired; /* is the mapping to be wired? */ 1677 enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */ 1678 1679 if (pmap == pmap_kernel()) { 1680 pmap_enter_kernel(va, pa, prot); 1681 return 0; 1682 } 1683 1684 /* 1685 * Determine if the mapping should be wired. 1686 */ 1687 wired = ((flags & PMAP_WIRED) != 0); 1688 1689 /* 1690 * NOTE1: 1691 * 1692 * On November 13, 1999, someone changed the pmap_enter() API such 1693 * that it now accepts a 'flags' argument. This new argument 1694 * contains bit-flags for the architecture-independent (UVM) system to 1695 * use in signalling certain mapping requirements to the architecture- 1696 * dependent (pmap) system. The argument it replaces, 'wired', is now 1697 * one of the flags within it. 1698 * 1699 * In addition to flags signaled by the architecture-independent 1700 * system, parts of the architecture-dependent section of the sun3x 1701 * kernel pass their own flags in the lower, unused bits of the 1702 * physical address supplied to this function. These flags are 1703 * extracted and stored in the temporary variable 'mapflags'. 1704 * 1705 * Extract sun3x specific flags from the physical address. 1706 */ 1707 mapflags = (pa & ~MMU_PAGE_MASK); 1708 pa &= MMU_PAGE_MASK; 1709 1710 /* 1711 * Determine if the physical address being mapped is on-board RAM. 1712 * Any other area of the address space is likely to belong to a 1713 * device and hence it would be disasterous to cache its contents. 1714 */ 1715 if ((managed = is_managed(pa)) == false) 1716 mapflags |= PMAP_NC; 1717 1718 /* 1719 * For user mappings we walk along the MMU tables of the given 1720 * pmap, reaching a PTE which describes the virtual page being 1721 * mapped or changed. If any level of the walk ends in an invalid 1722 * entry, a table must be allocated and the entry must be updated 1723 * to point to it. 1724 * There is a bit of confusion as to whether this code must be 1725 * re-entrant. For now we will assume it is. To support 1726 * re-entrancy we must unlink tables from the table pool before 1727 * we assume we may use them. Tables are re-linked into the pool 1728 * when we are finished with them at the end of the function. 1729 * But I don't feel like doing that until we have proof that this 1730 * needs to be re-entrant. 1731 * 'llevel' records which tables need to be relinked. 1732 */ 1733 llevel = NONE; 1734 1735 /* 1736 * Step 1 - Retrieve the A table from the pmap. If it has no 1737 * A table, allocate a new one from the available pool. 1738 */ 1739 1740 a_tbl = pmap->pm_a_tmgr; 1741 if (a_tbl == NULL) { 1742 /* 1743 * This pmap does not currently have an A table. Allocate 1744 * a new one. 1745 */ 1746 a_tbl = get_a_table(); 1747 a_tbl->at_parent = pmap; 1748 1749 /* 1750 * Assign this new A table to the pmap, and calculate its 1751 * physical address so that loadcrp() can be used to make 1752 * the table active. 1753 */ 1754 pmap->pm_a_tmgr = a_tbl; 1755 pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl); 1756 1757 /* 1758 * If the process receiving a new A table is the current 1759 * process, we are responsible for setting the MMU so that 1760 * it becomes the current address space. This only adds 1761 * new mappings, so no need to flush anything. 1762 */ 1763 if (pmap == current_pmap()) { 1764 kernel_crp.rp_addr = pmap->pm_a_phys; 1765 loadcrp(&kernel_crp); 1766 } 1767 1768 if (!wired) 1769 llevel = NEWA; 1770 } else { 1771 /* 1772 * Use the A table already allocated for this pmap. 1773 * Unlink it from the A table pool if necessary. 1774 */ 1775 if (wired && !a_tbl->at_wcnt) 1776 TAILQ_REMOVE(&a_pool, a_tbl, at_link); 1777 } 1778 1779 /* 1780 * Step 2 - Walk into the B table. If there is no valid B table, 1781 * allocate one. 1782 */ 1783 1784 a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */ 1785 a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */ 1786 if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */ 1787 /* The descriptor is valid. Use the B table it points to. */ 1788 /************************************* 1789 * a_idx * 1790 * v * 1791 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- * 1792 * | | | | | | | | | | | | * 1793 * +-+-+-+-+-+-+-+-+-+-+-+- * 1794 * | * 1795 * \- b_tbl -> +-+- * 1796 * | | * 1797 * +-+- * 1798 *************************************/ 1799 b_dte = mmu_ptov(a_dte->addr.raw); 1800 b_tbl = mmuB2tmgr(b_dte); 1801 1802 /* 1803 * If the requested mapping must be wired, but this table 1804 * being used to map it is not, the table must be removed 1805 * from the available pool and its wired entry count 1806 * incremented. 1807 */ 1808 if (wired && !b_tbl->bt_wcnt) { 1809 TAILQ_REMOVE(&b_pool, b_tbl, bt_link); 1810 a_tbl->at_wcnt++; 1811 } 1812 } else { 1813 /* The descriptor is invalid. Allocate a new B table. */ 1814 b_tbl = get_b_table(); 1815 1816 /* Point the parent A table descriptor to this new B table. */ 1817 a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl); 1818 a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT; 1819 a_tbl->at_ecnt++; /* Update parent's valid entry count */ 1820 1821 /* Create the necessary back references to the parent table */ 1822 b_tbl->bt_parent = a_tbl; 1823 b_tbl->bt_pidx = a_idx; 1824 1825 /* 1826 * If this table is to be wired, make sure the parent A table 1827 * wired count is updated to reflect that it has another wired 1828 * entry. 1829 */ 1830 if (wired) 1831 a_tbl->at_wcnt++; 1832 else if (llevel == NONE) 1833 llevel = NEWB; 1834 } 1835 1836 /* 1837 * Step 3 - Walk into the C table, if there is no valid C table, 1838 * allocate one. 1839 */ 1840 1841 b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */ 1842 b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */ 1843 if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */ 1844 /* The descriptor is valid. Use the C table it points to. */ 1845 /************************************** 1846 * c_idx * 1847 * | v * 1848 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- * 1849 * | | | | | | | | | | | * 1850 * +-+-+-+-+-+-+-+-+-+-+- * 1851 * | * 1852 * \- c_tbl -> +-+-- * 1853 * | | | * 1854 * +-+-- * 1855 **************************************/ 1856 c_pte = mmu_ptov(MMU_PTE_PA(*b_dte)); 1857 c_tbl = mmuC2tmgr(c_pte); 1858 1859 /* If mapping is wired and table is not */ 1860 if (wired && !c_tbl->ct_wcnt) { 1861 TAILQ_REMOVE(&c_pool, c_tbl, ct_link); 1862 b_tbl->bt_wcnt++; 1863 } 1864 } else { 1865 /* The descriptor is invalid. Allocate a new C table. */ 1866 c_tbl = get_c_table(); 1867 1868 /* Point the parent B table descriptor to this new C table. */ 1869 b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl); 1870 b_dte->attr.raw |= MMU_DT_SHORT; 1871 b_tbl->bt_ecnt++; /* Update parent's valid entry count */ 1872 1873 /* Create the necessary back references to the parent table */ 1874 c_tbl->ct_parent = b_tbl; 1875 c_tbl->ct_pidx = b_idx; 1876 /* 1877 * Store the pmap and base virtual managed address for faster 1878 * retrieval in the PV functions. 1879 */ 1880 c_tbl->ct_pmap = pmap; 1881 c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK)); 1882 1883 /* 1884 * If this table is to be wired, make sure the parent B table 1885 * wired count is updated to reflect that it has another wired 1886 * entry. 1887 */ 1888 if (wired) 1889 b_tbl->bt_wcnt++; 1890 else if (llevel == NONE) 1891 llevel = NEWC; 1892 } 1893 1894 /* 1895 * Step 4 - Deposit a page descriptor (PTE) into the appropriate 1896 * slot of the C table, describing the PA to which the VA is mapped. 1897 */ 1898 1899 pte_idx = MMU_TIC(va); 1900 c_pte = &c_tbl->ct_dtbl[pte_idx]; 1901 if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */ 1902 /* 1903 * The PTE is currently valid. This particular call 1904 * is just a synonym for one (or more) of the following 1905 * operations: 1906 * change protection of a page 1907 * change wiring status of a page 1908 * remove the mapping of a page 1909 */ 1910 1911 /* First check if this is a wiring operation. */ 1912 if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED) { 1913 /* 1914 * The existing mapping is wired, so adjust wired 1915 * entry count here. If new mapping is still wired, 1916 * wired entry count will be incremented again later. 1917 */ 1918 c_tbl->ct_wcnt--; 1919 if (!wired) { 1920 /* 1921 * The mapping of this PTE is being changed 1922 * from wired to unwired. 1923 * Adjust wired entry counts in each table and 1924 * set llevel flag to put unwired tables back 1925 * into the active pool. 1926 */ 1927 if (c_tbl->ct_wcnt == 0) { 1928 llevel = NEWC; 1929 if (--b_tbl->bt_wcnt == 0) { 1930 llevel = NEWB; 1931 if (--a_tbl->at_wcnt == 0) { 1932 llevel = NEWA; 1933 } 1934 } 1935 } 1936 } 1937 } 1938 1939 /* Is the new address the same as the old? */ 1940 if (MMU_PTE_PA(*c_pte) == pa) { 1941 /* 1942 * Yes, mark that it does not need to be reinserted 1943 * into the PV list. 1944 */ 1945 insert = false; 1946 1947 /* 1948 * Clear all but the modified, referenced and wired 1949 * bits on the PTE. 1950 */ 1951 c_pte->attr.raw &= (MMU_SHORT_PTE_M 1952 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED); 1953 } else { 1954 /* No, remove the old entry */ 1955 pmap_remove_pte(c_pte); 1956 insert = true; 1957 } 1958 1959 /* 1960 * TLB flush is only necessary if modifying current map. 1961 * However, in pmap_enter(), the pmap almost always IS 1962 * the current pmap, so don't even bother to check. 1963 */ 1964 TBIS(va); 1965 } else { 1966 /* 1967 * The PTE is invalid. Increment the valid entry count in 1968 * the C table manager to reflect the addition of a new entry. 1969 */ 1970 c_tbl->ct_ecnt++; 1971 1972 /* XXX - temporarily make sure the PTE is cleared. */ 1973 c_pte->attr.raw = 0; 1974 1975 /* It will also need to be inserted into the PV list. */ 1976 insert = true; 1977 } 1978 1979 /* 1980 * If page is changing from unwired to wired status, set an unused bit 1981 * within the PTE to indicate that it is wired. Also increment the 1982 * wired entry count in the C table manager. 1983 */ 1984 if (wired) { 1985 c_pte->attr.raw |= MMU_SHORT_PTE_WIRED; 1986 c_tbl->ct_wcnt++; 1987 } 1988 1989 /* 1990 * Map the page, being careful to preserve modify/reference/wired 1991 * bits. At this point it is assumed that the PTE either has no bits 1992 * set, or if there are set bits, they are only modified, reference or 1993 * wired bits. If not, the following statement will cause erratic 1994 * behavior. 1995 */ 1996 #ifdef PMAP_DEBUG 1997 if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M | 1998 MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) { 1999 printf("pmap_enter: junk left in PTE at %p\n", c_pte); 2000 Debugger(); 2001 } 2002 #endif 2003 c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE); 2004 2005 /* 2006 * If the mapping should be read-only, set the write protect 2007 * bit in the PTE. 2008 */ 2009 if (!(prot & VM_PROT_WRITE)) 2010 c_pte->attr.raw |= MMU_SHORT_PTE_WP; 2011 2012 /* 2013 * Mark the PTE as used and/or modified as specified by the flags arg. 2014 */ 2015 if (flags & VM_PROT_ALL) { 2016 c_pte->attr.raw |= MMU_SHORT_PTE_USED; 2017 if (flags & VM_PROT_WRITE) { 2018 c_pte->attr.raw |= MMU_SHORT_PTE_M; 2019 } 2020 } 2021 2022 /* 2023 * If the mapping should be cache inhibited (indicated by the flag 2024 * bits found on the lower order of the physical address.) 2025 * mark the PTE as a cache inhibited page. 2026 */ 2027 if (mapflags & PMAP_NC) 2028 c_pte->attr.raw |= MMU_SHORT_PTE_CI; 2029 2030 /* 2031 * If the physical address being mapped is managed by the PV 2032 * system then link the pte into the list of pages mapped to that 2033 * address. 2034 */ 2035 if (insert && managed) { 2036 pv = pa2pv(pa); 2037 nidx = pteidx(c_pte); 2038 2039 pvebase[nidx].pve_next = pv->pv_idx; 2040 pv->pv_idx = nidx; 2041 } 2042 2043 /* Move any allocated or unwired tables back into the active pool. */ 2044 2045 switch (llevel) { 2046 case NEWA: 2047 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link); 2048 /* FALLTHROUGH */ 2049 case NEWB: 2050 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link); 2051 /* FALLTHROUGH */ 2052 case NEWC: 2053 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link); 2054 /* FALLTHROUGH */ 2055 default: 2056 break; 2057 } 2058 2059 return 0; 2060 } 2061 2062 /* pmap_enter_kernel INTERNAL 2063 ** 2064 * Map the given virtual address to the given physical address within the 2065 * kernel address space. This function exists because the kernel map does 2066 * not do dynamic table allocation. It consists of a contiguous array of ptes 2067 * and can be edited directly without the need to walk through any tables. 2068 * 2069 * XXX: "Danger, Will Robinson!" 2070 * Note that the kernel should never take a fault on any page 2071 * between [ KERNBASE .. virtual_avail ] and this is checked in 2072 * trap.c for kernel-mode MMU faults. This means that mappings 2073 * created in that range must be implicily wired. -gwr 2074 */ 2075 void 2076 pmap_enter_kernel(vaddr_t va, paddr_t pa, vm_prot_t prot) 2077 { 2078 bool was_valid, insert; 2079 u_short pte_idx; 2080 int flags; 2081 mmu_short_pte_t *pte; 2082 pv_t *pv; 2083 paddr_t old_pa; 2084 2085 flags = (pa & ~MMU_PAGE_MASK); 2086 pa &= MMU_PAGE_MASK; 2087 2088 if (is_managed(pa)) 2089 insert = true; 2090 else 2091 insert = false; 2092 2093 /* 2094 * Calculate the index of the PTE being modified. 2095 */ 2096 pte_idx = (u_long)m68k_btop(va - KERNBASE3X); 2097 2098 /* This array is traditionally named "Sysmap" */ 2099 pte = &kernCbase[pte_idx]; 2100 2101 if (MMU_VALID_DT(*pte)) { 2102 was_valid = true; 2103 /* 2104 * If the PTE already maps a different 2105 * physical address, umap and pv_unlink. 2106 */ 2107 old_pa = MMU_PTE_PA(*pte); 2108 if (pa != old_pa) 2109 pmap_remove_pte(pte); 2110 else { 2111 /* 2112 * Old PA and new PA are the same. No need to 2113 * relink the mapping within the PV list. 2114 */ 2115 insert = false; 2116 2117 /* 2118 * Save any mod/ref bits on the PTE. 2119 */ 2120 pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M); 2121 } 2122 } else { 2123 pte->attr.raw = MMU_DT_INVALID; 2124 was_valid = false; 2125 } 2126 2127 /* 2128 * Map the page. Being careful to preserve modified/referenced bits 2129 * on the PTE. 2130 */ 2131 pte->attr.raw |= (pa | MMU_DT_PAGE); 2132 2133 if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */ 2134 pte->attr.raw |= MMU_SHORT_PTE_WP; 2135 if (flags & PMAP_NC) 2136 pte->attr.raw |= MMU_SHORT_PTE_CI; 2137 if (was_valid) 2138 TBIS(va); 2139 2140 /* 2141 * Insert the PTE into the PV system, if need be. 2142 */ 2143 if (insert) { 2144 pv = pa2pv(pa); 2145 pvebase[pte_idx].pve_next = pv->pv_idx; 2146 pv->pv_idx = pte_idx; 2147 } 2148 } 2149 2150 void 2151 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 2152 { 2153 mmu_short_pte_t *pte; 2154 2155 /* This array is traditionally named "Sysmap" */ 2156 pte = &kernCbase[(u_long)m68k_btop(va - KERNBASE3X)]; 2157 2158 KASSERT(!MMU_VALID_DT(*pte)); 2159 pte->attr.raw = MMU_DT_INVALID | MMU_DT_PAGE | (pa & MMU_PAGE_MASK); 2160 if (!(prot & VM_PROT_WRITE)) 2161 pte->attr.raw |= MMU_SHORT_PTE_WP; 2162 } 2163 2164 void 2165 pmap_kremove(vaddr_t va, vsize_t len) 2166 { 2167 int idx, eidx; 2168 2169 #ifdef PMAP_DEBUG 2170 if ((va & PGOFSET) || (len & PGOFSET)) 2171 panic("pmap_kremove: alignment"); 2172 #endif 2173 2174 idx = m68k_btop(va - KERNBASE3X); 2175 eidx = m68k_btop(va + len - KERNBASE3X); 2176 2177 while (idx < eidx) { 2178 kernCbase[idx++].attr.raw = MMU_DT_INVALID; 2179 TBIS(va); 2180 va += PAGE_SIZE; 2181 } 2182 } 2183 2184 /* pmap_map INTERNAL 2185 ** 2186 * Map a contiguous range of physical memory into a contiguous range of 2187 * the kernel virtual address space. 2188 * 2189 * Used for device mappings and early mapping of the kernel text/data/bss. 2190 * Returns the first virtual address beyond the end of the range. 2191 */ 2192 vaddr_t 2193 pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot) 2194 { 2195 int sz; 2196 2197 sz = endpa - pa; 2198 do { 2199 pmap_enter_kernel(va, pa, prot); 2200 va += PAGE_SIZE; 2201 pa += PAGE_SIZE; 2202 sz -= PAGE_SIZE; 2203 } while (sz > 0); 2204 pmap_update(pmap_kernel()); 2205 return va; 2206 } 2207 2208 /* pmap_protect_kernel INTERNAL 2209 ** 2210 * Apply the given protection code to a kernel address range. 2211 */ 2212 static INLINE void 2213 pmap_protect_kernel(vaddr_t startva, vaddr_t endva, vm_prot_t prot) 2214 { 2215 vaddr_t va; 2216 mmu_short_pte_t *pte; 2217 2218 pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE3X)]; 2219 for (va = startva; va < endva; va += PAGE_SIZE, pte++) { 2220 if (MMU_VALID_DT(*pte)) { 2221 switch (prot) { 2222 case VM_PROT_ALL: 2223 break; 2224 case VM_PROT_EXECUTE: 2225 case VM_PROT_READ: 2226 case VM_PROT_READ|VM_PROT_EXECUTE: 2227 pte->attr.raw |= MMU_SHORT_PTE_WP; 2228 break; 2229 case VM_PROT_NONE: 2230 /* this is an alias for 'pmap_remove_kernel' */ 2231 pmap_remove_pte(pte); 2232 break; 2233 default: 2234 break; 2235 } 2236 /* 2237 * since this is the kernel, immediately flush any cached 2238 * descriptors for this address. 2239 */ 2240 TBIS(va); 2241 } 2242 } 2243 } 2244 2245 /* pmap_protect INTERFACE 2246 ** 2247 * Apply the given protection to the given virtual address range within 2248 * the given map. 2249 * 2250 * It is ok for the protection applied to be stronger than what is 2251 * specified. We use this to our advantage when the given map has no 2252 * mapping for the virtual address. By skipping a page when this 2253 * is discovered, we are effectively applying a protection of VM_PROT_NONE, 2254 * and therefore do not need to map the page just to apply a protection 2255 * code. Only pmap_enter() needs to create new mappings if they do not exist. 2256 * 2257 * XXX - This function could be speeded up by using pmap_stroll() for inital 2258 * setup, and then manual scrolling in the for() loop. 2259 */ 2260 void 2261 pmap_protect(pmap_t pmap, vaddr_t startva, vaddr_t endva, vm_prot_t prot) 2262 { 2263 bool iscurpmap; 2264 int a_idx, b_idx, c_idx; 2265 a_tmgr_t *a_tbl; 2266 b_tmgr_t *b_tbl; 2267 c_tmgr_t *c_tbl; 2268 mmu_short_pte_t *pte; 2269 2270 if (pmap == pmap_kernel()) { 2271 pmap_protect_kernel(startva, endva, prot); 2272 return; 2273 } 2274 2275 /* 2276 * In this particular pmap implementation, there are only three 2277 * types of memory protection: 'all' (read/write/execute), 2278 * 'read-only' (read/execute) and 'none' (no mapping.) 2279 * It is not possible for us to treat 'executable' as a separate 2280 * protection type. Therefore, protection requests that seek to 2281 * remove execute permission while retaining read or write, and those 2282 * that make little sense (write-only for example) are ignored. 2283 */ 2284 switch (prot) { 2285 case VM_PROT_NONE: 2286 /* 2287 * A request to apply the protection code of 2288 * 'VM_PROT_NONE' is a synonym for pmap_remove(). 2289 */ 2290 pmap_remove(pmap, startva, endva); 2291 return; 2292 case VM_PROT_EXECUTE: 2293 case VM_PROT_READ: 2294 case VM_PROT_READ|VM_PROT_EXECUTE: 2295 /* continue */ 2296 break; 2297 case VM_PROT_WRITE: 2298 case VM_PROT_WRITE|VM_PROT_READ: 2299 case VM_PROT_WRITE|VM_PROT_EXECUTE: 2300 case VM_PROT_ALL: 2301 /* None of these should happen in a sane system. */ 2302 return; 2303 } 2304 2305 /* 2306 * If the pmap has no A table, it has no mappings and therefore 2307 * there is nothing to protect. 2308 */ 2309 if ((a_tbl = pmap->pm_a_tmgr) == NULL) 2310 return; 2311 2312 a_idx = MMU_TIA(startva); 2313 b_idx = MMU_TIB(startva); 2314 c_idx = MMU_TIC(startva); 2315 b_tbl = NULL; 2316 c_tbl = NULL; 2317 2318 iscurpmap = (pmap == current_pmap()); 2319 while (startva < endva) { 2320 if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) { 2321 if (b_tbl == NULL) { 2322 b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw; 2323 b_tbl = mmu_ptov((vaddr_t)b_tbl); 2324 b_tbl = mmuB2tmgr((mmu_short_dte_t *)b_tbl); 2325 } 2326 if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) { 2327 if (c_tbl == NULL) { 2328 c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]); 2329 c_tbl = mmu_ptov((vaddr_t)c_tbl); 2330 c_tbl = mmuC2tmgr((mmu_short_pte_t *)c_tbl); 2331 } 2332 if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) { 2333 pte = &c_tbl->ct_dtbl[c_idx]; 2334 /* make the mapping read-only */ 2335 pte->attr.raw |= MMU_SHORT_PTE_WP; 2336 /* 2337 * If we just modified the current address space, 2338 * flush any translations for the modified page from 2339 * the translation cache and any data from it in the 2340 * data cache. 2341 */ 2342 if (iscurpmap) 2343 TBIS(startva); 2344 } 2345 startva += PAGE_SIZE; 2346 2347 if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */ 2348 c_tbl = NULL; 2349 c_idx = 0; 2350 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */ 2351 b_tbl = NULL; 2352 b_idx = 0; 2353 } 2354 } 2355 } else { /* C table wasn't valid */ 2356 c_tbl = NULL; 2357 c_idx = 0; 2358 startva += MMU_TIB_RANGE; 2359 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */ 2360 b_tbl = NULL; 2361 b_idx = 0; 2362 } 2363 } /* C table */ 2364 } else { /* B table wasn't valid */ 2365 b_tbl = NULL; 2366 b_idx = 0; 2367 startva += MMU_TIA_RANGE; 2368 a_idx++; 2369 } /* B table */ 2370 } 2371 } 2372 2373 /* pmap_unwire INTERFACE 2374 ** 2375 * Clear the wired attribute of the specified page. 2376 * 2377 * This function is called from vm_fault.c to unwire 2378 * a mapping. 2379 */ 2380 void 2381 pmap_unwire(pmap_t pmap, vaddr_t va) 2382 { 2383 int a_idx, b_idx, c_idx; 2384 a_tmgr_t *a_tbl; 2385 b_tmgr_t *b_tbl; 2386 c_tmgr_t *c_tbl; 2387 mmu_short_pte_t *pte; 2388 2389 /* Kernel mappings always remain wired. */ 2390 if (pmap == pmap_kernel()) 2391 return; 2392 2393 /* 2394 * Walk through the tables. If the walk terminates without 2395 * a valid PTE then the address wasn't wired in the first place. 2396 * Return immediately. 2397 */ 2398 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx, 2399 &b_idx, &c_idx) == false) 2400 return; 2401 2402 2403 /* Is the PTE wired? If not, return. */ 2404 if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED)) 2405 return; 2406 2407 /* Remove the wiring bit. */ 2408 pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED); 2409 2410 /* 2411 * Decrement the wired entry count in the C table. 2412 * If it reaches zero the following things happen: 2413 * 1. The table no longer has any wired entries and is considered 2414 * unwired. 2415 * 2. It is placed on the available queue. 2416 * 3. The parent table's wired entry count is decremented. 2417 * 4. If it reaches zero, this process repeats at step 1 and 2418 * stops at after reaching the A table. 2419 */ 2420 if (--c_tbl->ct_wcnt == 0) { 2421 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link); 2422 if (--b_tbl->bt_wcnt == 0) { 2423 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link); 2424 if (--a_tbl->at_wcnt == 0) { 2425 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link); 2426 } 2427 } 2428 } 2429 } 2430 2431 /* pmap_copy INTERFACE 2432 ** 2433 * Copy the mappings of a range of addresses in one pmap, into 2434 * the destination address of another. 2435 * 2436 * This routine is advisory. Should we one day decide that MMU tables 2437 * may be shared by more than one pmap, this function should be used to 2438 * link them together. Until that day however, we do nothing. 2439 */ 2440 void 2441 pmap_copy(pmap_t pmap_a, pmap_t pmap_b, vaddr_t dst, vsize_t len, vaddr_t src) 2442 { 2443 2444 /* not implemented. */ 2445 } 2446 2447 /* pmap_copy_page INTERFACE 2448 ** 2449 * Copy the contents of one physical page into another. 2450 * 2451 * This function makes use of two virtual pages allocated in pmap_bootstrap() 2452 * to map the two specified physical pages into the kernel address space. 2453 * 2454 * Note: We could use the transparent translation registers to make the 2455 * mappings. If we do so, be sure to disable interrupts before using them. 2456 */ 2457 void 2458 pmap_copy_page(paddr_t srcpa, paddr_t dstpa) 2459 { 2460 vaddr_t srcva, dstva; 2461 int s; 2462 2463 srcva = tmp_vpages[0]; 2464 dstva = tmp_vpages[1]; 2465 2466 s = splvm(); 2467 #ifdef DIAGNOSTIC 2468 if (tmp_vpages_inuse++) 2469 panic("pmap_copy_page: temporary vpages are in use."); 2470 #endif 2471 2472 /* Map pages as non-cacheable to avoid cache polution? */ 2473 pmap_kenter_pa(srcva, srcpa, VM_PROT_READ, 0); 2474 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE, 0); 2475 2476 /* Hand-optimized version of memcpy(dst, src, PAGE_SIZE) */ 2477 copypage((char *)srcva, (char *)dstva); 2478 2479 pmap_kremove(srcva, PAGE_SIZE); 2480 pmap_kremove(dstva, PAGE_SIZE); 2481 2482 #ifdef DIAGNOSTIC 2483 --tmp_vpages_inuse; 2484 #endif 2485 splx(s); 2486 } 2487 2488 /* pmap_zero_page INTERFACE 2489 ** 2490 * Zero the contents of the specified physical page. 2491 * 2492 * Uses one of the virtual pages allocated in pmap_boostrap() 2493 * to map the specified page into the kernel address space. 2494 */ 2495 void 2496 pmap_zero_page(paddr_t dstpa) 2497 { 2498 vaddr_t dstva; 2499 int s; 2500 2501 dstva = tmp_vpages[1]; 2502 s = splvm(); 2503 #ifdef DIAGNOSTIC 2504 if (tmp_vpages_inuse++) 2505 panic("pmap_zero_page: temporary vpages are in use."); 2506 #endif 2507 2508 /* The comments in pmap_copy_page() above apply here also. */ 2509 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE, 0); 2510 2511 /* Hand-optimized version of memset(ptr, 0, PAGE_SIZE) */ 2512 zeropage((char *)dstva); 2513 2514 pmap_kremove(dstva, PAGE_SIZE); 2515 #ifdef DIAGNOSTIC 2516 --tmp_vpages_inuse; 2517 #endif 2518 splx(s); 2519 } 2520 2521 /* pmap_pinit INTERNAL 2522 ** 2523 * Initialize a pmap structure. 2524 */ 2525 static INLINE void 2526 pmap_pinit(pmap_t pmap) 2527 { 2528 2529 memset(pmap, 0, sizeof(struct pmap)); 2530 pmap->pm_a_tmgr = NULL; 2531 pmap->pm_a_phys = kernAphys; 2532 pmap->pm_refcount = 1; 2533 simple_lock_init(&pmap->pm_lock); 2534 } 2535 2536 /* pmap_create INTERFACE 2537 ** 2538 * Create and return a pmap structure. 2539 */ 2540 pmap_t 2541 pmap_create(void) 2542 { 2543 pmap_t pmap; 2544 2545 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); 2546 pmap_pinit(pmap); 2547 return pmap; 2548 } 2549 2550 /* pmap_release INTERNAL 2551 ** 2552 * Release any resources held by the given pmap. 2553 * 2554 * This is the reverse analog to pmap_pinit. It does not 2555 * necessarily mean for the pmap structure to be deallocated, 2556 * as in pmap_destroy. 2557 */ 2558 static INLINE void 2559 pmap_release(pmap_t pmap) 2560 { 2561 2562 /* 2563 * As long as the pmap contains no mappings, 2564 * which always should be the case whenever 2565 * this function is called, there really should 2566 * be nothing to do. 2567 */ 2568 #ifdef PMAP_DEBUG 2569 if (pmap == pmap_kernel()) 2570 panic("pmap_release: kernel pmap"); 2571 #endif 2572 /* 2573 * XXX - If this pmap has an A table, give it back. 2574 * The pmap SHOULD be empty by now, and pmap_remove 2575 * should have already given back the A table... 2576 * However, I see: pmap->pm_a_tmgr->at_ecnt == 1 2577 * at this point, which means some mapping was not 2578 * removed when it should have been. -gwr 2579 */ 2580 if (pmap->pm_a_tmgr != NULL) { 2581 /* First make sure we are not using it! */ 2582 if (kernel_crp.rp_addr == pmap->pm_a_phys) { 2583 kernel_crp.rp_addr = kernAphys; 2584 loadcrp(&kernel_crp); 2585 } 2586 #ifdef PMAP_DEBUG /* XXX - todo! */ 2587 /* XXX - Now complain... */ 2588 printf("pmap_release: still have table\n"); 2589 Debugger(); 2590 #endif 2591 free_a_table(pmap->pm_a_tmgr, true); 2592 pmap->pm_a_tmgr = NULL; 2593 pmap->pm_a_phys = kernAphys; 2594 } 2595 } 2596 2597 /* pmap_reference INTERFACE 2598 ** 2599 * Increment the reference count of a pmap. 2600 */ 2601 void 2602 pmap_reference(pmap_t pmap) 2603 { 2604 pmap_lock(pmap); 2605 pmap_add_ref(pmap); 2606 pmap_unlock(pmap); 2607 } 2608 2609 /* pmap_dereference INTERNAL 2610 ** 2611 * Decrease the reference count on the given pmap 2612 * by one and return the current count. 2613 */ 2614 static INLINE int 2615 pmap_dereference(pmap_t pmap) 2616 { 2617 int rtn; 2618 2619 pmap_lock(pmap); 2620 rtn = pmap_del_ref(pmap); 2621 pmap_unlock(pmap); 2622 2623 return rtn; 2624 } 2625 2626 /* pmap_destroy INTERFACE 2627 ** 2628 * Decrement a pmap's reference count and delete 2629 * the pmap if it becomes zero. Will be called 2630 * only after all mappings have been removed. 2631 */ 2632 void 2633 pmap_destroy(pmap_t pmap) 2634 { 2635 2636 if (pmap_dereference(pmap) == 0) { 2637 pmap_release(pmap); 2638 pool_put(&pmap_pmap_pool, pmap); 2639 } 2640 } 2641 2642 /* pmap_is_referenced INTERFACE 2643 ** 2644 * Determine if the given physical page has been 2645 * referenced (read from [or written to.]) 2646 */ 2647 bool 2648 pmap_is_referenced(struct vm_page *pg) 2649 { 2650 paddr_t pa = VM_PAGE_TO_PHYS(pg); 2651 pv_t *pv; 2652 int idx; 2653 2654 /* 2655 * Check the flags on the pv head. If they are set, 2656 * return immediately. Otherwise a search must be done. 2657 */ 2658 2659 pv = pa2pv(pa); 2660 if (pv->pv_flags & PV_FLAGS_USED) 2661 return true; 2662 2663 /* 2664 * Search through all pv elements pointing 2665 * to this page and query their reference bits 2666 */ 2667 2668 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) { 2669 if (MMU_PTE_USED(kernCbase[idx])) { 2670 return true; 2671 } 2672 } 2673 return false; 2674 } 2675 2676 /* pmap_is_modified INTERFACE 2677 ** 2678 * Determine if the given physical page has been 2679 * modified (written to.) 2680 */ 2681 bool 2682 pmap_is_modified(struct vm_page *pg) 2683 { 2684 paddr_t pa = VM_PAGE_TO_PHYS(pg); 2685 pv_t *pv; 2686 int idx; 2687 2688 /* see comments in pmap_is_referenced() */ 2689 pv = pa2pv(pa); 2690 if (pv->pv_flags & PV_FLAGS_MDFY) 2691 return true; 2692 2693 for (idx = pv->pv_idx; 2694 idx != PVE_EOL; 2695 idx = pvebase[idx].pve_next) { 2696 2697 if (MMU_PTE_MODIFIED(kernCbase[idx])) { 2698 return true; 2699 } 2700 } 2701 2702 return false; 2703 } 2704 2705 /* pmap_page_protect INTERFACE 2706 ** 2707 * Applies the given protection to all mappings to the given 2708 * physical page. 2709 */ 2710 void 2711 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 2712 { 2713 paddr_t pa = VM_PAGE_TO_PHYS(pg); 2714 pv_t *pv; 2715 int idx; 2716 vaddr_t va; 2717 struct mmu_short_pte_struct *pte; 2718 c_tmgr_t *c_tbl; 2719 pmap_t pmap, curpmap; 2720 2721 curpmap = current_pmap(); 2722 pv = pa2pv(pa); 2723 2724 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) { 2725 pte = &kernCbase[idx]; 2726 switch (prot) { 2727 case VM_PROT_ALL: 2728 /* do nothing */ 2729 break; 2730 case VM_PROT_EXECUTE: 2731 case VM_PROT_READ: 2732 case VM_PROT_READ|VM_PROT_EXECUTE: 2733 /* 2734 * Determine the virtual address mapped by 2735 * the PTE and flush ATC entries if necessary. 2736 */ 2737 va = pmap_get_pteinfo(idx, &pmap, &c_tbl); 2738 pte->attr.raw |= MMU_SHORT_PTE_WP; 2739 if (pmap == curpmap || pmap == pmap_kernel()) 2740 TBIS(va); 2741 break; 2742 case VM_PROT_NONE: 2743 /* Save the mod/ref bits. */ 2744 pv->pv_flags |= pte->attr.raw; 2745 /* Invalidate the PTE. */ 2746 pte->attr.raw = MMU_DT_INVALID; 2747 2748 /* 2749 * Update table counts. And flush ATC entries 2750 * if necessary. 2751 */ 2752 va = pmap_get_pteinfo(idx, &pmap, &c_tbl); 2753 2754 /* 2755 * If the PTE belongs to the kernel map, 2756 * be sure to flush the page it maps. 2757 */ 2758 if (pmap == pmap_kernel()) { 2759 TBIS(va); 2760 } else { 2761 /* 2762 * The PTE belongs to a user map. 2763 * update the entry count in the C 2764 * table to which it belongs and flush 2765 * the ATC if the mapping belongs to 2766 * the current pmap. 2767 */ 2768 c_tbl->ct_ecnt--; 2769 if (pmap == curpmap) 2770 TBIS(va); 2771 } 2772 break; 2773 default: 2774 break; 2775 } 2776 } 2777 2778 /* 2779 * If the protection code indicates that all mappings to the page 2780 * be removed, truncate the PV list to zero entries. 2781 */ 2782 if (prot == VM_PROT_NONE) 2783 pv->pv_idx = PVE_EOL; 2784 } 2785 2786 /* pmap_get_pteinfo INTERNAL 2787 ** 2788 * Called internally to find the pmap and virtual address within that 2789 * map to which the pte at the given index maps. Also includes the PTE's C 2790 * table manager. 2791 * 2792 * Returns the pmap in the argument provided, and the virtual address 2793 * by return value. 2794 */ 2795 vaddr_t 2796 pmap_get_pteinfo(u_int idx, pmap_t *pmap, c_tmgr_t **tbl) 2797 { 2798 vaddr_t va = 0; 2799 2800 /* 2801 * Determine if the PTE is a kernel PTE or a user PTE. 2802 */ 2803 if (idx >= NUM_KERN_PTES) { 2804 /* 2805 * The PTE belongs to a user mapping. 2806 */ 2807 /* XXX: Would like an inline for this to validate idx... */ 2808 *tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE]; 2809 2810 *pmap = (*tbl)->ct_pmap; 2811 /* 2812 * To find the va to which the PTE maps, we first take 2813 * the table's base virtual address mapping which is stored 2814 * in ct_va. We then increment this address by a page for 2815 * every slot skipped until we reach the PTE. 2816 */ 2817 va = (*tbl)->ct_va; 2818 va += m68k_ptob(idx % MMU_C_TBL_SIZE); 2819 } else { 2820 /* 2821 * The PTE belongs to the kernel map. 2822 */ 2823 *pmap = pmap_kernel(); 2824 2825 va = m68k_ptob(idx); 2826 va += KERNBASE3X; 2827 } 2828 2829 return va; 2830 } 2831 2832 /* pmap_clear_modify INTERFACE 2833 ** 2834 * Clear the modification bit on the page at the specified 2835 * physical address. 2836 * 2837 */ 2838 bool 2839 pmap_clear_modify(struct vm_page *pg) 2840 { 2841 paddr_t pa = VM_PAGE_TO_PHYS(pg); 2842 bool rv; 2843 2844 rv = pmap_is_modified(pg); 2845 pmap_clear_pv(pa, PV_FLAGS_MDFY); 2846 return rv; 2847 } 2848 2849 /* pmap_clear_reference INTERFACE 2850 ** 2851 * Clear the referenced bit on the page at the specified 2852 * physical address. 2853 */ 2854 bool 2855 pmap_clear_reference(struct vm_page *pg) 2856 { 2857 paddr_t pa = VM_PAGE_TO_PHYS(pg); 2858 bool rv; 2859 2860 rv = pmap_is_referenced(pg); 2861 pmap_clear_pv(pa, PV_FLAGS_USED); 2862 return rv; 2863 } 2864 2865 /* pmap_clear_pv INTERNAL 2866 ** 2867 * Clears the specified flag from the specified physical address. 2868 * (Used by pmap_clear_modify() and pmap_clear_reference().) 2869 * 2870 * Flag is one of: 2871 * PV_FLAGS_MDFY - Page modified bit. 2872 * PV_FLAGS_USED - Page used (referenced) bit. 2873 * 2874 * This routine must not only clear the flag on the pv list 2875 * head. It must also clear the bit on every pte in the pv 2876 * list associated with the address. 2877 */ 2878 void 2879 pmap_clear_pv(paddr_t pa, int flag) 2880 { 2881 pv_t *pv; 2882 int idx; 2883 vaddr_t va; 2884 pmap_t pmap; 2885 mmu_short_pte_t *pte; 2886 c_tmgr_t *c_tbl; 2887 2888 pv = pa2pv(pa); 2889 pv->pv_flags &= ~(flag); 2890 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) { 2891 pte = &kernCbase[idx]; 2892 pte->attr.raw &= ~(flag); 2893 2894 /* 2895 * The MC68030 MMU will not set the modified or 2896 * referenced bits on any MMU tables for which it has 2897 * a cached descriptor with its modify bit set. To insure 2898 * that it will modify these bits on the PTE during the next 2899 * time it is written to or read from, we must flush it from 2900 * the ATC. 2901 * 2902 * Ordinarily it is only necessary to flush the descriptor 2903 * if it is used in the current address space. But since I 2904 * am not sure that there will always be a notion of 2905 * 'the current address space' when this function is called, 2906 * I will skip the test and always flush the address. It 2907 * does no harm. 2908 */ 2909 2910 va = pmap_get_pteinfo(idx, &pmap, &c_tbl); 2911 TBIS(va); 2912 } 2913 } 2914 2915 /* pmap_extract_kernel INTERNAL 2916 ** 2917 * Extract a translation from the kernel address space. 2918 */ 2919 static INLINE bool 2920 pmap_extract_kernel(vaddr_t va, paddr_t *pap) 2921 { 2922 mmu_short_pte_t *pte; 2923 2924 pte = &kernCbase[(u_int)m68k_btop(va - KERNBASE3X)]; 2925 if (!MMU_VALID_DT(*pte)) 2926 return false; 2927 if (pap != NULL) 2928 *pap = MMU_PTE_PA(*pte); 2929 return true; 2930 } 2931 2932 /* pmap_extract INTERFACE 2933 ** 2934 * Return the physical address mapped by the virtual address 2935 * in the specified pmap. 2936 * 2937 * Note: this function should also apply an exclusive lock 2938 * on the pmap system during its duration. 2939 */ 2940 bool 2941 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) 2942 { 2943 int a_idx, b_idx, pte_idx; 2944 a_tmgr_t *a_tbl; 2945 b_tmgr_t *b_tbl; 2946 c_tmgr_t *c_tbl; 2947 mmu_short_pte_t *c_pte; 2948 2949 if (pmap == pmap_kernel()) 2950 return pmap_extract_kernel(va, pap); 2951 2952 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, 2953 &c_pte, &a_idx, &b_idx, &pte_idx) == false) 2954 return false; 2955 2956 if (!MMU_VALID_DT(*c_pte)) 2957 return false; 2958 2959 if (pap != NULL) 2960 *pap = MMU_PTE_PA(*c_pte); 2961 return true; 2962 } 2963 2964 /* pmap_remove_kernel INTERNAL 2965 ** 2966 * Remove the mapping of a range of virtual addresses from the kernel map. 2967 * The arguments are already page-aligned. 2968 */ 2969 static INLINE void 2970 pmap_remove_kernel(vaddr_t sva, vaddr_t eva) 2971 { 2972 int idx, eidx; 2973 2974 #ifdef PMAP_DEBUG 2975 if ((sva & PGOFSET) || (eva & PGOFSET)) 2976 panic("pmap_remove_kernel: alignment"); 2977 #endif 2978 2979 idx = m68k_btop(sva - KERNBASE3X); 2980 eidx = m68k_btop(eva - KERNBASE3X); 2981 2982 while (idx < eidx) { 2983 pmap_remove_pte(&kernCbase[idx++]); 2984 TBIS(sva); 2985 sva += PAGE_SIZE; 2986 } 2987 } 2988 2989 /* pmap_remove INTERFACE 2990 ** 2991 * Remove the mapping of a range of virtual addresses from the given pmap. 2992 * 2993 */ 2994 void 2995 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) 2996 { 2997 2998 if (pmap == pmap_kernel()) { 2999 pmap_remove_kernel(sva, eva); 3000 return; 3001 } 3002 3003 /* 3004 * If the pmap doesn't have an A table of its own, it has no mappings 3005 * that can be removed. 3006 */ 3007 if (pmap->pm_a_tmgr == NULL) 3008 return; 3009 3010 /* 3011 * Remove the specified range from the pmap. If the function 3012 * returns true, the operation removed all the valid mappings 3013 * in the pmap and freed its A table. If this happened to the 3014 * currently loaded pmap, the MMU root pointer must be reloaded 3015 * with the default 'kernel' map. 3016 */ 3017 if (pmap_remove_a(pmap->pm_a_tmgr, sva, eva)) { 3018 if (kernel_crp.rp_addr == pmap->pm_a_phys) { 3019 kernel_crp.rp_addr = kernAphys; 3020 loadcrp(&kernel_crp); 3021 /* will do TLB flush below */ 3022 } 3023 pmap->pm_a_tmgr = NULL; 3024 pmap->pm_a_phys = kernAphys; 3025 } 3026 3027 /* 3028 * If we just modified the current address space, 3029 * make sure to flush the MMU cache. 3030 * 3031 * XXX - this could be an unecessarily large flush. 3032 * XXX - Could decide, based on the size of the VA range 3033 * to be removed, whether to flush "by pages" or "all". 3034 */ 3035 if (pmap == current_pmap()) 3036 TBIAU(); 3037 } 3038 3039 /* pmap_remove_a INTERNAL 3040 ** 3041 * This is function number one in a set of three that removes a range 3042 * of memory in the most efficient manner by removing the highest possible 3043 * tables from the memory space. This particular function attempts to remove 3044 * as many B tables as it can, delegating the remaining fragmented ranges to 3045 * pmap_remove_b(). 3046 * 3047 * If the removal operation results in an empty A table, the function returns 3048 * true. 3049 * 3050 * It's ugly but will do for now. 3051 */ 3052 bool 3053 pmap_remove_a(a_tmgr_t *a_tbl, vaddr_t sva, vaddr_t eva) 3054 { 3055 bool empty; 3056 int idx; 3057 vaddr_t nstart, nend; 3058 b_tmgr_t *b_tbl; 3059 mmu_long_dte_t *a_dte; 3060 mmu_short_dte_t *b_dte; 3061 uint8_t at_wired, bt_wired; 3062 3063 /* 3064 * The following code works with what I call a 'granularity 3065 * reduction algorithim'. A range of addresses will always have 3066 * the following properties, which are classified according to 3067 * how the range relates to the size of the current granularity 3068 * - an A table entry: 3069 * 3070 * 1 2 3 4 3071 * -+---+---+---+---+---+---+---+- 3072 * -+---+---+---+---+---+---+---+- 3073 * 3074 * A range will always start on a granularity boundary, illustrated 3075 * by '+' signs in the table above, or it will start at some point 3076 * inbetween a granularity boundary, as illustrated by point 1. 3077 * The first step in removing a range of addresses is to remove the 3078 * range between 1 and 2, the nearest granularity boundary. This 3079 * job is handled by the section of code governed by the 3080 * 'if (start < nstart)' statement. 3081 * 3082 * A range will always encompass zero or more intergral granules, 3083 * illustrated by points 2 and 3. Integral granules are easy to 3084 * remove. The removal of these granules is the second step, and 3085 * is handled by the code block 'if (nstart < nend)'. 3086 * 3087 * Lastly, a range will always end on a granularity boundary, 3088 * ill. by point 3, or it will fall just beyond one, ill. by point 3089 * 4. The last step involves removing this range and is handled by 3090 * the code block 'if (nend < end)'. 3091 */ 3092 nstart = MMU_ROUND_UP_A(sva); 3093 nend = MMU_ROUND_A(eva); 3094 3095 at_wired = a_tbl->at_wcnt; 3096 3097 if (sva < nstart) { 3098 /* 3099 * This block is executed if the range starts between 3100 * a granularity boundary. 3101 * 3102 * First find the DTE which is responsible for mapping 3103 * the start of the range. 3104 */ 3105 idx = MMU_TIA(sva); 3106 a_dte = &a_tbl->at_dtbl[idx]; 3107 3108 /* 3109 * If the DTE is valid then delegate the removal of the sub 3110 * range to pmap_remove_b(), which can remove addresses at 3111 * a finer granularity. 3112 */ 3113 if (MMU_VALID_DT(*a_dte)) { 3114 b_dte = mmu_ptov(a_dte->addr.raw); 3115 b_tbl = mmuB2tmgr(b_dte); 3116 bt_wired = b_tbl->bt_wcnt; 3117 3118 /* 3119 * The sub range to be removed starts at the start 3120 * of the full range we were asked to remove, and ends 3121 * at the greater of: 3122 * 1. The end of the full range, -or- 3123 * 2. The end of the full range, rounded down to the 3124 * nearest granularity boundary. 3125 */ 3126 if (eva < nstart) 3127 empty = pmap_remove_b(b_tbl, sva, eva); 3128 else 3129 empty = pmap_remove_b(b_tbl, sva, nstart); 3130 3131 /* 3132 * If the child table no longer has wired entries, 3133 * decrement wired entry count. 3134 */ 3135 if (bt_wired && b_tbl->bt_wcnt == 0) 3136 a_tbl->at_wcnt--; 3137 3138 /* 3139 * If the removal resulted in an empty B table, 3140 * invalidate the DTE that points to it and decrement 3141 * the valid entry count of the A table. 3142 */ 3143 if (empty) { 3144 a_dte->attr.raw = MMU_DT_INVALID; 3145 a_tbl->at_ecnt--; 3146 } 3147 } 3148 /* 3149 * If the DTE is invalid, the address range is already non- 3150 * existent and can simply be skipped. 3151 */ 3152 } 3153 if (nstart < nend) { 3154 /* 3155 * This block is executed if the range spans a whole number 3156 * multiple of granules (A table entries.) 3157 * 3158 * First find the DTE which is responsible for mapping 3159 * the start of the first granule involved. 3160 */ 3161 idx = MMU_TIA(nstart); 3162 a_dte = &a_tbl->at_dtbl[idx]; 3163 3164 /* 3165 * Remove entire sub-granules (B tables) one at a time, 3166 * until reaching the end of the range. 3167 */ 3168 for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE) 3169 if (MMU_VALID_DT(*a_dte)) { 3170 /* 3171 * Find the B table manager for the 3172 * entry and free it. 3173 */ 3174 b_dte = mmu_ptov(a_dte->addr.raw); 3175 b_tbl = mmuB2tmgr(b_dte); 3176 bt_wired = b_tbl->bt_wcnt; 3177 3178 free_b_table(b_tbl, true); 3179 3180 /* 3181 * All child entries has been removed. 3182 * If there were any wired entries in it, 3183 * decrement wired entry count. 3184 */ 3185 if (bt_wired) 3186 a_tbl->at_wcnt--; 3187 3188 /* 3189 * Invalidate the DTE that points to the 3190 * B table and decrement the valid entry 3191 * count of the A table. 3192 */ 3193 a_dte->attr.raw = MMU_DT_INVALID; 3194 a_tbl->at_ecnt--; 3195 } 3196 } 3197 if (nend < eva) { 3198 /* 3199 * This block is executed if the range ends beyond a 3200 * granularity boundary. 3201 * 3202 * First find the DTE which is responsible for mapping 3203 * the start of the nearest (rounded down) granularity 3204 * boundary. 3205 */ 3206 idx = MMU_TIA(nend); 3207 a_dte = &a_tbl->at_dtbl[idx]; 3208 3209 /* 3210 * If the DTE is valid then delegate the removal of the sub 3211 * range to pmap_remove_b(), which can remove addresses at 3212 * a finer granularity. 3213 */ 3214 if (MMU_VALID_DT(*a_dte)) { 3215 /* 3216 * Find the B table manager for the entry 3217 * and hand it to pmap_remove_b() along with 3218 * the sub range. 3219 */ 3220 b_dte = mmu_ptov(a_dte->addr.raw); 3221 b_tbl = mmuB2tmgr(b_dte); 3222 bt_wired = b_tbl->bt_wcnt; 3223 3224 empty = pmap_remove_b(b_tbl, nend, eva); 3225 3226 /* 3227 * If the child table no longer has wired entries, 3228 * decrement wired entry count. 3229 */ 3230 if (bt_wired && b_tbl->bt_wcnt == 0) 3231 a_tbl->at_wcnt--; 3232 /* 3233 * If the removal resulted in an empty B table, 3234 * invalidate the DTE that points to it and decrement 3235 * the valid entry count of the A table. 3236 */ 3237 if (empty) { 3238 a_dte->attr.raw = MMU_DT_INVALID; 3239 a_tbl->at_ecnt--; 3240 } 3241 } 3242 } 3243 3244 /* 3245 * If there are no more entries in the A table, release it 3246 * back to the available pool and return true. 3247 */ 3248 if (a_tbl->at_ecnt == 0) { 3249 KASSERT(a_tbl->at_wcnt == 0); 3250 a_tbl->at_parent = NULL; 3251 if (!at_wired) 3252 TAILQ_REMOVE(&a_pool, a_tbl, at_link); 3253 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link); 3254 empty = true; 3255 } else { 3256 /* 3257 * If the table doesn't have wired entries any longer 3258 * but still has unwired entries, put it back into 3259 * the available queue. 3260 */ 3261 if (at_wired && a_tbl->at_wcnt == 0) 3262 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link); 3263 empty = false; 3264 } 3265 3266 return empty; 3267 } 3268 3269 /* pmap_remove_b INTERNAL 3270 ** 3271 * Remove a range of addresses from an address space, trying to remove entire 3272 * C tables if possible. 3273 * 3274 * If the operation results in an empty B table, the function returns true. 3275 */ 3276 bool 3277 pmap_remove_b(b_tmgr_t *b_tbl, vaddr_t sva, vaddr_t eva) 3278 { 3279 bool empty; 3280 int idx; 3281 vaddr_t nstart, nend, rstart; 3282 c_tmgr_t *c_tbl; 3283 mmu_short_dte_t *b_dte; 3284 mmu_short_pte_t *c_dte; 3285 uint8_t bt_wired, ct_wired; 3286 3287 nstart = MMU_ROUND_UP_B(sva); 3288 nend = MMU_ROUND_B(eva); 3289 3290 bt_wired = b_tbl->bt_wcnt; 3291 3292 if (sva < nstart) { 3293 idx = MMU_TIB(sva); 3294 b_dte = &b_tbl->bt_dtbl[idx]; 3295 if (MMU_VALID_DT(*b_dte)) { 3296 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte)); 3297 c_tbl = mmuC2tmgr(c_dte); 3298 ct_wired = c_tbl->ct_wcnt; 3299 3300 if (eva < nstart) 3301 empty = pmap_remove_c(c_tbl, sva, eva); 3302 else 3303 empty = pmap_remove_c(c_tbl, sva, nstart); 3304 3305 /* 3306 * If the child table no longer has wired entries, 3307 * decrement wired entry count. 3308 */ 3309 if (ct_wired && c_tbl->ct_wcnt == 0) 3310 b_tbl->bt_wcnt--; 3311 3312 if (empty) { 3313 b_dte->attr.raw = MMU_DT_INVALID; 3314 b_tbl->bt_ecnt--; 3315 } 3316 } 3317 } 3318 if (nstart < nend) { 3319 idx = MMU_TIB(nstart); 3320 b_dte = &b_tbl->bt_dtbl[idx]; 3321 rstart = nstart; 3322 while (rstart < nend) { 3323 if (MMU_VALID_DT(*b_dte)) { 3324 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte)); 3325 c_tbl = mmuC2tmgr(c_dte); 3326 ct_wired = c_tbl->ct_wcnt; 3327 3328 free_c_table(c_tbl, true); 3329 3330 /* 3331 * All child entries has been removed. 3332 * If there were any wired entries in it, 3333 * decrement wired entry count. 3334 */ 3335 if (ct_wired) 3336 b_tbl->bt_wcnt--; 3337 3338 b_dte->attr.raw = MMU_DT_INVALID; 3339 b_tbl->bt_ecnt--; 3340 } 3341 b_dte++; 3342 rstart += MMU_TIB_RANGE; 3343 } 3344 } 3345 if (nend < eva) { 3346 idx = MMU_TIB(nend); 3347 b_dte = &b_tbl->bt_dtbl[idx]; 3348 if (MMU_VALID_DT(*b_dte)) { 3349 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte)); 3350 c_tbl = mmuC2tmgr(c_dte); 3351 ct_wired = c_tbl->ct_wcnt; 3352 empty = pmap_remove_c(c_tbl, nend, eva); 3353 3354 /* 3355 * If the child table no longer has wired entries, 3356 * decrement wired entry count. 3357 */ 3358 if (ct_wired && c_tbl->ct_wcnt == 0) 3359 b_tbl->bt_wcnt--; 3360 3361 if (empty) { 3362 b_dte->attr.raw = MMU_DT_INVALID; 3363 b_tbl->bt_ecnt--; 3364 } 3365 } 3366 } 3367 3368 if (b_tbl->bt_ecnt == 0) { 3369 KASSERT(b_tbl->bt_wcnt == 0); 3370 b_tbl->bt_parent = NULL; 3371 if (!bt_wired) 3372 TAILQ_REMOVE(&b_pool, b_tbl, bt_link); 3373 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link); 3374 empty = true; 3375 } else { 3376 /* 3377 * If the table doesn't have wired entries any longer 3378 * but still has unwired entries, put it back into 3379 * the available queue. 3380 */ 3381 if (bt_wired && b_tbl->bt_wcnt == 0) 3382 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link); 3383 3384 empty = false; 3385 } 3386 3387 return empty; 3388 } 3389 3390 /* pmap_remove_c INTERNAL 3391 ** 3392 * Remove a range of addresses from the given C table. 3393 */ 3394 bool 3395 pmap_remove_c(c_tmgr_t *c_tbl, vaddr_t sva, vaddr_t eva) 3396 { 3397 bool empty; 3398 int idx; 3399 mmu_short_pte_t *c_pte; 3400 uint8_t ct_wired; 3401 3402 ct_wired = c_tbl->ct_wcnt; 3403 3404 idx = MMU_TIC(sva); 3405 c_pte = &c_tbl->ct_dtbl[idx]; 3406 for (; sva < eva; sva += MMU_PAGE_SIZE, c_pte++) { 3407 if (MMU_VALID_DT(*c_pte)) { 3408 if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED) 3409 c_tbl->ct_wcnt--; 3410 pmap_remove_pte(c_pte); 3411 c_tbl->ct_ecnt--; 3412 } 3413 } 3414 3415 if (c_tbl->ct_ecnt == 0) { 3416 KASSERT(c_tbl->ct_wcnt == 0); 3417 c_tbl->ct_parent = NULL; 3418 if (!ct_wired) 3419 TAILQ_REMOVE(&c_pool, c_tbl, ct_link); 3420 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link); 3421 empty = true; 3422 } else { 3423 /* 3424 * If the table doesn't have wired entries any longer 3425 * but still has unwired entries, put it back into 3426 * the available queue. 3427 */ 3428 if (ct_wired && c_tbl->ct_wcnt == 0) 3429 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link); 3430 empty = false; 3431 } 3432 3433 return empty; 3434 } 3435 3436 /* pmap_bootstrap_alloc INTERNAL 3437 ** 3438 * Used internally for memory allocation at startup when malloc is not 3439 * available. This code will fail once it crosses the first memory 3440 * bank boundary on the 3/80. Hopefully by then however, the VM system 3441 * will be in charge of allocation. 3442 */ 3443 void * 3444 pmap_bootstrap_alloc(int size) 3445 { 3446 void *rtn; 3447 3448 #ifdef PMAP_DEBUG 3449 if (bootstrap_alloc_enabled == false) { 3450 mon_printf("pmap_bootstrap_alloc: disabled\n"); 3451 sunmon_abort(); 3452 } 3453 #endif 3454 3455 rtn = (void *) virtual_avail; 3456 virtual_avail += size; 3457 3458 #ifdef PMAP_DEBUG 3459 if (virtual_avail > virtual_contig_end) { 3460 mon_printf("pmap_bootstrap_alloc: out of mem\n"); 3461 sunmon_abort(); 3462 } 3463 #endif 3464 3465 return rtn; 3466 } 3467 3468 /* pmap_bootstap_aalign INTERNAL 3469 ** 3470 * Used to insure that the next call to pmap_bootstrap_alloc() will 3471 * return a chunk of memory aligned to the specified size. 3472 * 3473 * Note: This function will only support alignment sizes that are powers 3474 * of two. 3475 */ 3476 void 3477 pmap_bootstrap_aalign(int size) 3478 { 3479 int off; 3480 3481 off = virtual_avail & (size - 1); 3482 if (off) { 3483 (void)pmap_bootstrap_alloc(size - off); 3484 } 3485 } 3486 3487 /* pmap_pa_exists 3488 ** 3489 * Used by the /dev/mem driver to see if a given PA is memory 3490 * that can be mapped. (The PA is not in a hole.) 3491 */ 3492 int 3493 pmap_pa_exists(paddr_t pa) 3494 { 3495 int i; 3496 3497 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) { 3498 if ((pa >= avail_mem[i].pmem_start) && 3499 (pa < avail_mem[i].pmem_end)) 3500 return 1; 3501 if (avail_mem[i].pmem_next == NULL) 3502 break; 3503 } 3504 return 0; 3505 } 3506 3507 /* Called only from locore.s and pmap.c */ 3508 void _pmap_switch(pmap_t pmap); 3509 3510 /* 3511 * _pmap_switch INTERNAL 3512 * 3513 * This is called by locore.s:cpu_switch() when it is 3514 * switching to a new process. Load new translations. 3515 * Note: done in-line by locore.s unless PMAP_DEBUG 3516 * 3517 * Note that we do NOT allocate a context here, but 3518 * share the "kernel only" context until we really 3519 * need our own context for user-space mappings in 3520 * pmap_enter_user(). [ s/context/mmu A table/ ] 3521 */ 3522 void 3523 _pmap_switch(pmap_t pmap) 3524 { 3525 u_long rootpa; 3526 3527 /* 3528 * Only do reload/flush if we have to. 3529 * Note that if the old and new process 3530 * were BOTH using the "null" context, 3531 * then this will NOT flush the TLB. 3532 */ 3533 rootpa = pmap->pm_a_phys; 3534 if (kernel_crp.rp_addr != rootpa) { 3535 DPRINT(("pmap_activate(%p)\n", pmap)); 3536 kernel_crp.rp_addr = rootpa; 3537 loadcrp(&kernel_crp); 3538 TBIAU(); 3539 } 3540 } 3541 3542 /* 3543 * Exported version of pmap_activate(). This is called from the 3544 * machine-independent VM code when a process is given a new pmap. 3545 * If (p == curlwp) do like cpu_switch would do; otherwise just 3546 * take this as notification that the process has a new pmap. 3547 */ 3548 void 3549 pmap_activate(struct lwp *l) 3550 { 3551 3552 if (l->l_proc == curproc) { 3553 _pmap_switch(l->l_proc->p_vmspace->vm_map.pmap); 3554 } 3555 } 3556 3557 /* 3558 * pmap_deactivate INTERFACE 3559 ** 3560 * This is called to deactivate the specified process's address space. 3561 */ 3562 void 3563 pmap_deactivate(struct lwp *l) 3564 { 3565 3566 /* Nothing to do. */ 3567 } 3568 3569 /* 3570 * Fill in the sun3x-specific part of the kernel core header 3571 * for dumpsys(). (See machdep.c for the rest.) 3572 */ 3573 void 3574 pmap_kcore_hdr(struct sun3x_kcore_hdr *sh) 3575 { 3576 u_long spa, len; 3577 int i; 3578 3579 sh->pg_frame = MMU_SHORT_PTE_BASEADDR; 3580 sh->pg_valid = MMU_DT_PAGE; 3581 sh->contig_end = virtual_contig_end; 3582 sh->kernCbase = (u_long)kernCbase; 3583 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) { 3584 spa = avail_mem[i].pmem_start; 3585 spa = m68k_trunc_page(spa); 3586 len = avail_mem[i].pmem_end - spa; 3587 len = m68k_round_page(len); 3588 sh->ram_segs[i].start = spa; 3589 sh->ram_segs[i].size = len; 3590 } 3591 } 3592 3593 3594 /* pmap_virtual_space INTERFACE 3595 ** 3596 * Return the current available range of virtual addresses in the 3597 * arguuments provided. Only really called once. 3598 */ 3599 void 3600 pmap_virtual_space(vaddr_t *vstart, vaddr_t *vend) 3601 { 3602 3603 *vstart = virtual_avail; 3604 *vend = virtual_end; 3605 } 3606 3607 /* 3608 * Provide memory to the VM system. 3609 * 3610 * Assume avail_start is always in the 3611 * first segment as pmap_bootstrap does. 3612 */ 3613 static void 3614 pmap_page_upload(void) 3615 { 3616 paddr_t a, b; /* memory range */ 3617 int i; 3618 3619 /* Supply the memory in segments. */ 3620 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) { 3621 a = atop(avail_mem[i].pmem_start); 3622 b = atop(avail_mem[i].pmem_end); 3623 if (i == 0) 3624 a = atop(avail_start); 3625 if (avail_mem[i].pmem_end > avail_end) 3626 b = atop(avail_end); 3627 3628 uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT); 3629 3630 if (avail_mem[i].pmem_next == NULL) 3631 break; 3632 } 3633 } 3634 3635 /* pmap_count INTERFACE 3636 ** 3637 * Return the number of resident (valid) pages in the given pmap. 3638 * 3639 * Note: If this function is handed the kernel map, it will report 3640 * that it has no mappings. Hopefully the VM system won't ask for kernel 3641 * map statistics. 3642 */ 3643 segsz_t 3644 pmap_count(pmap_t pmap, int type) 3645 { 3646 u_int count; 3647 int a_idx, b_idx; 3648 a_tmgr_t *a_tbl; 3649 b_tmgr_t *b_tbl; 3650 c_tmgr_t *c_tbl; 3651 3652 /* 3653 * If the pmap does not have its own A table manager, it has no 3654 * valid entires. 3655 */ 3656 if (pmap->pm_a_tmgr == NULL) 3657 return 0; 3658 3659 a_tbl = pmap->pm_a_tmgr; 3660 3661 count = 0; 3662 for (a_idx = 0; a_idx < MMU_TIA(KERNBASE3X); a_idx++) { 3663 if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) { 3664 b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw)); 3665 for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) { 3666 if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) { 3667 c_tbl = mmuC2tmgr( 3668 mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]))); 3669 if (type == 0) 3670 /* 3671 * A resident entry count has been requested. 3672 */ 3673 count += c_tbl->ct_ecnt; 3674 else 3675 /* 3676 * A wired entry count has been requested. 3677 */ 3678 count += c_tbl->ct_wcnt; 3679 } 3680 } 3681 } 3682 } 3683 3684 return count; 3685 } 3686 3687 /************************ SUN3 COMPATIBILITY ROUTINES ******************** 3688 * The following routines are only used by DDB for tricky kernel text * 3689 * text operations in db_memrw.c. They are provided for sun3 * 3690 * compatibility. * 3691 *************************************************************************/ 3692 /* get_pte INTERNAL 3693 ** 3694 * Return the page descriptor the describes the kernel mapping 3695 * of the given virtual address. 3696 */ 3697 extern u_long ptest_addr(u_long); /* XXX: locore.s */ 3698 u_int 3699 get_pte(vaddr_t va) 3700 { 3701 u_long pte_pa; 3702 mmu_short_pte_t *pte; 3703 3704 /* Get the physical address of the PTE */ 3705 pte_pa = ptest_addr(va & ~PGOFSET); 3706 3707 /* Convert to a virtual address... */ 3708 pte = (mmu_short_pte_t *) (KERNBASE3X + pte_pa); 3709 3710 /* Make sure it is in our level-C tables... */ 3711 if ((pte < kernCbase) || 3712 (pte >= &mmuCbase[NUM_USER_PTES])) 3713 return 0; 3714 3715 /* ... and just return its contents. */ 3716 return (pte->attr.raw); 3717 } 3718 3719 3720 /* set_pte INTERNAL 3721 ** 3722 * Set the page descriptor that describes the kernel mapping 3723 * of the given virtual address. 3724 */ 3725 void 3726 set_pte(vaddr_t va, u_int pte) 3727 { 3728 u_long idx; 3729 3730 if (va < KERNBASE3X) 3731 return; 3732 3733 idx = (unsigned long) m68k_btop(va - KERNBASE3X); 3734 kernCbase[idx].attr.raw = pte; 3735 TBIS(va); 3736 } 3737 3738 /* 3739 * Routine: pmap_procwr 3740 * 3741 * Function: 3742 * Synchronize caches corresponding to [addr, addr+len) in p. 3743 */ 3744 void 3745 pmap_procwr(struct proc *p, vaddr_t va, size_t len) 3746 { 3747 3748 (void)cachectl1(0x80000004, va, len, p); 3749 } 3750 3751 3752 #ifdef PMAP_DEBUG 3753 /************************** DEBUGGING ROUTINES ************************** 3754 * The following routines are meant to be an aid to debugging the pmap * 3755 * system. They are callable from the DDB command line and should be * 3756 * prepared to be handed unstable or incomplete states of the system. * 3757 ************************************************************************/ 3758 3759 /* pv_list 3760 ** 3761 * List all pages found on the pv list for the given physical page. 3762 * To avoid endless loops, the listing will stop at the end of the list 3763 * or after 'n' entries - whichever comes first. 3764 */ 3765 void 3766 pv_list(paddr_t pa, int n) 3767 { 3768 int idx; 3769 vaddr_t va; 3770 pv_t *pv; 3771 c_tmgr_t *c_tbl; 3772 pmap_t pmap; 3773 3774 pv = pa2pv(pa); 3775 idx = pv->pv_idx; 3776 for (; idx != PVE_EOL && n > 0; idx = pvebase[idx].pve_next, n--) { 3777 va = pmap_get_pteinfo(idx, &pmap, &c_tbl); 3778 printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n", 3779 idx, (u_int) pmap, (u_int) va, (u_int) c_tbl); 3780 } 3781 } 3782 #endif /* PMAP_DEBUG */ 3783 3784 #ifdef NOT_YET 3785 /* and maybe not ever */ 3786 /************************** LOW-LEVEL ROUTINES ************************** 3787 * These routines will eventually be re-written into assembly and placed* 3788 * in locore.s. They are here now as stubs so that the pmap module can * 3789 * be linked as a standalone user program for testing. * 3790 ************************************************************************/ 3791 /* flush_atc_crp INTERNAL 3792 ** 3793 * Flush all page descriptors derived from the given CPU Root Pointer 3794 * (CRP), or 'A' table as it is known here, from the 68851's automatic 3795 * cache. 3796 */ 3797 void 3798 flush_atc_crp(int a_tbl) 3799 { 3800 mmu_long_rp_t rp; 3801 3802 /* Create a temporary root table pointer that points to the 3803 * given A table. 3804 */ 3805 rp.attr.raw = ~MMU_LONG_RP_LU; 3806 rp.addr.raw = (unsigned int) a_tbl; 3807 3808 mmu_pflushr(&rp); 3809 /* mmu_pflushr: 3810 * movel sp(4)@,a0 3811 * pflushr a0@ 3812 * rts 3813 */ 3814 } 3815 #endif /* NOT_YET */ 3816