1 /* $NetBSD: pmap.c,v 1.121 2022/05/31 08:43:15 andvar Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jeremy Cooper.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * XXX These comments aren't quite accurate. Need to change.
34 * The sun3x uses the MC68851 Memory Management Unit, which is built
35 * into the CPU. The 68851 maps virtual to physical addresses using
36 * a multi-level table lookup, which is stored in the very memory that
37 * it maps. The number of levels of lookup is configurable from one
38 * to four. In this implementation, we use three, named 'A' through 'C'.
39 *
40 * The MMU translates virtual addresses into physical addresses by
41 * traversing these tables in a process called a 'table walk'. The most
42 * significant 7 bits of the Virtual Address ('VA') being translated are
43 * used as an index into the level A table, whose base in physical memory
44 * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The
45 * address found at that index in the A table is used as the base
46 * address for the next table, the B table. The next six bits of the VA are
47 * used as an index into the B table, which in turn gives the base address
48 * of the third and final C table.
49 *
50 * The next six bits of the VA are used as an index into the C table to
51 * locate a Page Table Entry (PTE). The PTE is a physical address in memory
52 * to which the remaining 13 bits of the VA are added, producing the
53 * mapped physical address.
54 *
55 * To map the entire memory space in this manner would require 2114296 bytes
56 * of page tables per process - quite expensive. Instead we will
57 * allocate a fixed but considerably smaller space for the page tables at
58 * the time the VM system is initialized. When the pmap code is asked by
59 * the kernel to map a VA to a PA, it allocates tables as needed from this
60 * pool. When there are no more tables in the pool, tables are stolen
61 * from the oldest mapped entries in the tree. This is only possible
62 * because all memory mappings are stored in the kernel memory map
63 * structures, independent of the pmap structures. A VA which references
64 * one of these invalidated maps will cause a page fault. The kernel
65 * will determine that the page fault was caused by a task using a valid
66 * VA, but for some reason (which does not concern it), that address was
67 * not mapped. It will ask the pmap code to re-map the entry and then
68 * it will resume executing the faulting task.
69 *
70 * In this manner the most efficient use of the page table space is
71 * achieved. Tasks which do not execute often will have their tables
72 * stolen and reused by tasks which execute more frequently. The best
73 * size for the page table pool will probably be determined by
74 * experimentation.
75 *
76 * You read all of the comments so far. Good for you.
77 * Now go play!
78 */
79
80 /*** A Note About the 68851 Address Translation Cache
81 * The MC68851 has a 64 entry cache, called the Address Translation Cache
82 * or 'ATC'. This cache stores the most recently used page descriptors
83 * accessed by the MMU when it does translations. Using a marker called a
84 * 'task alias' the MMU can store the descriptors from 8 different table
85 * spaces concurrently. The task alias is associated with the base
86 * address of the level A table of that address space. When an address
87 * space is currently active (the CRP currently points to its A table)
88 * the only cached descriptors that will be obeyed are ones which have a
89 * matching task alias of the current space associated with them.
90 *
91 * Since the cache is always consulted before any table lookups are done,
92 * it is important that it accurately reflect the state of the MMU tables.
93 * Whenever a change has been made to a table that has been loaded into
94 * the MMU, the code must be sure to flush any cached entries that are
95 * affected by the change. These instances are documented in the code at
96 * various points.
97 */
98 /*** A Note About the Note About the 68851 Address Translation Cache
99 * 4 months into this code I discovered that the sun3x does not have
100 * a MC68851 chip. Instead, it has a version of this MMU that is part of the
101 * the 68030 CPU.
102 * All though it behaves very similarly to the 68851, it only has 1 task
103 * alias and a 22 entry cache. So sadly (or happily), the first paragraph
104 * of the previous note does not apply to the sun3x pmap.
105 */
106
107 #include <sys/cdefs.h>
108 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.121 2022/05/31 08:43:15 andvar Exp $");
109
110 #include "opt_ddb.h"
111 #include "opt_pmap_debug.h"
112
113 #include <sys/param.h>
114 #include <sys/systm.h>
115 #include <sys/proc.h>
116 #include <sys/malloc.h>
117 #include <sys/pool.h>
118 #include <sys/queue.h>
119 #include <sys/kcore.h>
120 #include <sys/atomic.h>
121
122 #include <uvm/uvm.h>
123
124 #include <machine/cpu.h>
125 #include <machine/kcore.h>
126 #include <machine/mon.h>
127 #include <machine/pmap.h>
128 #include <machine/pte.h>
129 #include <machine/vmparam.h>
130 #include <m68k/cacheops.h>
131
132 #include <sun3/sun3/cache.h>
133 #include <sun3/sun3/machdep.h>
134
135 #include "pmap_pvt.h"
136
137 /* XXX - What headers declare these? */
138 extern struct pcb *curpcb;
139
140 /* Defined in locore.s */
141 extern char kernel_text[];
142
143 /* Defined by the linker */
144 extern char etext[], edata[], end[];
145 extern char *esym; /* DDB */
146
147 /*************************** DEBUGGING DEFINITIONS ***********************
148 * Macros, preprocessor defines and variables used in debugging can make *
149 * code hard to read. Anything used exclusively for debugging purposes *
150 * is defined here to avoid having such mess scattered around the file. *
151 *************************************************************************/
152 #ifdef PMAP_DEBUG
153 /*
154 * To aid the debugging process, macros should be expanded into smaller steps
155 * that accomplish the same goal, yet provide convenient places for placing
156 * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the
157 * 'INLINE' keyword is defined to an empty string. This way, any function
158 * defined to be a 'static INLINE' will become 'outlined' and compiled as
159 * a separate function, which is much easier to debug.
160 */
161 #define INLINE /* nothing */
162
163 /*
164 * It is sometimes convenient to watch the activity of a particular table
165 * in the system. The following variables are used for that purpose.
166 */
167 a_tmgr_t *pmap_watch_atbl = 0;
168 b_tmgr_t *pmap_watch_btbl = 0;
169 c_tmgr_t *pmap_watch_ctbl = 0;
170
171 int pmap_debug = 0;
172 #define DPRINT(args) if (pmap_debug) printf args
173
174 #else /********** Stuff below is defined if NOT debugging **************/
175
176 #define INLINE inline
177 #define DPRINT(args) /* nada */
178
179 #endif /* PMAP_DEBUG */
180 /*********************** END OF DEBUGGING DEFINITIONS ********************/
181
182 /*** Management Structure - Memory Layout
183 * For every MMU table in the sun3x pmap system there must be a way to
184 * manage it; we must know which process is using it, what other tables
185 * depend on it, and whether or not it contains any locked pages. This
186 * is solved by the creation of 'table management' or 'tmgr'
187 * structures. One for each MMU table in the system.
188 *
189 * MAP OF MEMORY USED BY THE PMAP SYSTEM
190 *
191 * towards lower memory
192 * kernAbase -> +-------------------------------------------------------+
193 * | Kernel MMU A level table |
194 * kernBbase -> +-------------------------------------------------------+
195 * | Kernel MMU B level tables |
196 * kernCbase -> +-------------------------------------------------------+
197 * | |
198 * | Kernel MMU C level tables |
199 * | |
200 * mmuCbase -> +-------------------------------------------------------+
201 * | User MMU C level tables |
202 * mmuAbase -> +-------------------------------------------------------+
203 * | |
204 * | User MMU A level tables |
205 * | |
206 * mmuBbase -> +-------------------------------------------------------+
207 * | User MMU B level tables |
208 * tmgrAbase -> +-------------------------------------------------------+
209 * | TMGR A level table structures |
210 * tmgrBbase -> +-------------------------------------------------------+
211 * | TMGR B level table structures |
212 * tmgrCbase -> +-------------------------------------------------------+
213 * | TMGR C level table structures |
214 * pvbase -> +-------------------------------------------------------+
215 * | Physical to Virtual mapping table (list heads) |
216 * pvebase -> +-------------------------------------------------------+
217 * | Physical to Virtual mapping table (list elements) |
218 * | |
219 * +-------------------------------------------------------+
220 * towards higher memory
221 *
222 * For every A table in the MMU A area, there will be a corresponding
223 * a_tmgr structure in the TMGR A area. The same will be true for
224 * the B and C tables. This arrangement will make it easy to find the
225 * controlling tmgr structure for any table in the system by use of
226 * (relatively) simple macros.
227 */
228
229 /*
230 * Global variables for storing the base addresses for the areas
231 * labeled above.
232 */
233 static vaddr_t kernAphys;
234 static mmu_long_dte_t *kernAbase;
235 static mmu_short_dte_t *kernBbase;
236 static mmu_short_pte_t *kernCbase;
237 static mmu_short_pte_t *mmuCbase;
238 static mmu_short_dte_t *mmuBbase;
239 static mmu_long_dte_t *mmuAbase;
240 static a_tmgr_t *Atmgrbase;
241 static b_tmgr_t *Btmgrbase;
242 static c_tmgr_t *Ctmgrbase;
243 static pv_t *pvbase;
244 static pv_elem_t *pvebase;
245 static struct pmap kernel_pmap;
246 struct pmap *const kernel_pmap_ptr = &kernel_pmap;
247
248 /*
249 * This holds the CRP currently loaded into the MMU.
250 */
251 struct mmu_rootptr kernel_crp;
252
253 /*
254 * Just all around global variables.
255 */
256 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
257 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
258 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
259
260
261 /*
262 * Flags used to mark the safety/availability of certain operations or
263 * resources.
264 */
265 /* Safe to use pmap_bootstrap_alloc(). */
266 static bool bootstrap_alloc_enabled = false;
267 /* Temporary virtual pages are in use */
268 int tmp_vpages_inuse;
269
270 /*
271 * XXX: For now, retain the traditional variables that were
272 * used in the old pmap/vm interface (without NONCONTIG).
273 */
274 /* Kernel virtual address space available: */
275 vaddr_t virtual_avail, virtual_end;
276 /* Physical address space available: */
277 paddr_t avail_start, avail_end;
278
279 /* This keep track of the end of the contiguously mapped range. */
280 vaddr_t virtual_contig_end;
281
282 /* Physical address used by pmap_next_page() */
283 paddr_t avail_next;
284
285 /* These are used by pmap_copy_page(), etc. */
286 vaddr_t tmp_vpages[2];
287
288 /* memory pool for pmap structures */
289 struct pool pmap_pmap_pool;
290
291 /*
292 * The 3/80 is the only member of the sun3x family that has non-contiguous
293 * physical memory. Memory is divided into 4 banks which are physically
294 * locatable on the system board. Although the size of these banks varies
295 * with the size of memory they contain, their base addresses are
296 * permanently fixed. The following structure, which describes these
297 * banks, is initialized by pmap_bootstrap() after it reads from a similar
298 * structure provided by the ROM Monitor.
299 *
300 * For the other machines in the sun3x architecture which do have contiguous
301 * RAM, this list will have only one entry, which will describe the entire
302 * range of available memory.
303 */
304 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
305 u_int total_phys_mem;
306
307 /*************************************************************************/
308
309 /*
310 * XXX - Should "tune" these based on statistics.
311 *
312 * My first guess about the relative numbers of these needed is
313 * based on the fact that a "typical" process will have several
314 * pages mapped at low virtual addresses (text, data, bss), then
315 * some mapped shared libraries, and then some stack pages mapped
316 * near the high end of the VA space. Each process can use only
317 * one A table, and most will use only two B tables (maybe three)
318 * and probably about four C tables. Therefore, the first guess
319 * at the relative numbers of these needed is 1:2:4 -gwr
320 *
321 * The number of C tables needed is closely related to the amount
322 * of physical memory available plus a certain amount attributable
323 * to the use of double mappings. With a few simulation statistics
324 * we can find a reasonably good estimation of this unknown value.
325 * Armed with that and the above ratios, we have a good idea of what
326 * is needed at each level. -j
327 *
328 * Note: It is not physical memory memory size, but the total mapped
329 * virtual space required by the combined working sets of all the
330 * currently _runnable_ processes. (Sleeping ones don't count.)
331 * The amount of physical memory should be irrelevant. -gwr
332 */
333 #ifdef FIXED_NTABLES
334 #define NUM_A_TABLES 16
335 #define NUM_B_TABLES 32
336 #define NUM_C_TABLES 64
337 #else
338 unsigned int NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
339 #endif /* FIXED_NTABLES */
340
341 /*
342 * This determines our total virtual mapping capacity.
343 * Yes, it is a FIXED value so we can pre-allocate.
344 */
345 #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE)
346
347 /*
348 * The size of the Kernel Virtual Address Space (KVAS)
349 * for purposes of MMU table allocation is -KERNBASE
350 * (length from KERNBASE to 0xFFFFffff)
351 */
352 #define KVAS_SIZE (-KERNBASE3X)
353
354 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
355 #define KERN_B_TABLES (KVAS_SIZE >> MMU_TIA_SHIFT)
356 #define KERN_C_TABLES (KVAS_SIZE >> MMU_TIB_SHIFT)
357 #define NUM_KERN_PTES (KVAS_SIZE >> MMU_TIC_SHIFT)
358
359 /*************************** MISCELLANEOUS MACROS *************************/
360 void *pmap_bootstrap_alloc(int);
361
362 static INLINE void *mmu_ptov(paddr_t);
363 static INLINE paddr_t mmu_vtop(void *);
364
365 #if 0
366 static INLINE a_tmgr_t *mmuA2tmgr(mmu_long_dte_t *);
367 #endif /* 0 */
368 static INLINE b_tmgr_t *mmuB2tmgr(mmu_short_dte_t *);
369 static INLINE c_tmgr_t *mmuC2tmgr(mmu_short_pte_t *);
370
371 static INLINE pv_t *pa2pv(paddr_t);
372 static INLINE int pteidx(mmu_short_pte_t *);
373 static INLINE pmap_t current_pmap(void);
374
375 /*
376 * We can always convert between virtual and physical addresses
377 * for anything in the range [KERNBASE ... avail_start] because
378 * that range is GUARANTEED to be mapped linearly.
379 * We rely heavily upon this feature!
380 */
381 static INLINE void *
mmu_ptov(paddr_t pa)382 mmu_ptov(paddr_t pa)
383 {
384 vaddr_t va;
385
386 va = (pa + KERNBASE3X);
387 #ifdef PMAP_DEBUG
388 if ((va < KERNBASE3X) || (va >= virtual_contig_end))
389 panic("mmu_ptov");
390 #endif
391 return (void *)va;
392 }
393
394 static INLINE paddr_t
mmu_vtop(void * vva)395 mmu_vtop(void *vva)
396 {
397 vaddr_t va;
398
399 va = (vaddr_t)vva;
400 #ifdef PMAP_DEBUG
401 if ((va < KERNBASE3X) || (va >= virtual_contig_end))
402 panic("mmu_vtop");
403 #endif
404 return va - KERNBASE3X;
405 }
406
407 /*
408 * These macros map MMU tables to their corresponding manager structures.
409 * They are needed quite often because many of the pointers in the pmap
410 * system reference MMU tables and not the structures that control them.
411 * There needs to be a way to find one when given the other and these
412 * macros do so by taking advantage of the memory layout described above.
413 * Here's a quick step through the first macro, mmuA2tmgr():
414 *
415 * 1) find the offset of the given MMU A table from the base of its table
416 * pool (table - mmuAbase).
417 * 2) convert this offset into a table index by dividing it by the
418 * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
419 * 3) use this index to select the corresponding 'A' table manager
420 * structure from the 'A' table manager pool (Atmgrbase[index]).
421 */
422 /* This function is not currently used. */
423 #if 0
424 static INLINE a_tmgr_t *
425 mmuA2tmgr(mmu_long_dte_t *mmuAtbl)
426 {
427 int idx;
428
429 /* Which table is this in? */
430 idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
431 #ifdef PMAP_DEBUG
432 if ((idx < 0) || (idx >= NUM_A_TABLES))
433 panic("mmuA2tmgr");
434 #endif
435 return &Atmgrbase[idx];
436 }
437 #endif /* 0 */
438
439 static INLINE b_tmgr_t *
mmuB2tmgr(mmu_short_dte_t * mmuBtbl)440 mmuB2tmgr(mmu_short_dte_t *mmuBtbl)
441 {
442 int idx;
443
444 /* Which table is this in? */
445 idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
446 #ifdef PMAP_DEBUG
447 if ((idx < 0) || (idx >= NUM_B_TABLES))
448 panic("mmuB2tmgr");
449 #endif
450 return &Btmgrbase[idx];
451 }
452
453 /* mmuC2tmgr INTERNAL
454 **
455 * Given a pte known to belong to a C table, return the address of
456 * that table's management structure.
457 */
458 static INLINE c_tmgr_t *
mmuC2tmgr(mmu_short_pte_t * mmuCtbl)459 mmuC2tmgr(mmu_short_pte_t *mmuCtbl)
460 {
461 int idx;
462
463 /* Which table is this in? */
464 idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
465 #ifdef PMAP_DEBUG
466 if ((idx < 0) || (idx >= NUM_C_TABLES))
467 panic("mmuC2tmgr");
468 #endif
469 return &Ctmgrbase[idx];
470 }
471
472 /* This is now a function call below.
473 * #define pa2pv(pa) \
474 * (&pvbase[(unsigned long)\
475 * m68k_btop(pa)\
476 * ])
477 */
478
479 /* pa2pv INTERNAL
480 **
481 * Return the pv_list_head element which manages the given physical
482 * address.
483 */
484 static INLINE pv_t *
pa2pv(paddr_t pa)485 pa2pv(paddr_t pa)
486 {
487 struct pmap_physmem_struct *bank;
488 int idx;
489
490 bank = &avail_mem[0];
491 while (pa >= bank->pmem_end)
492 bank = bank->pmem_next;
493
494 pa -= bank->pmem_start;
495 idx = bank->pmem_pvbase + m68k_btop(pa);
496 #ifdef PMAP_DEBUG
497 if ((idx < 0) || (idx >= physmem))
498 panic("pa2pv");
499 #endif
500 return &pvbase[idx];
501 }
502
503 /* pteidx INTERNAL
504 **
505 * Return the index of the given PTE within the entire fixed table of
506 * PTEs.
507 */
508 static INLINE int
pteidx(mmu_short_pte_t * pte)509 pteidx(mmu_short_pte_t *pte)
510 {
511
512 return pte - kernCbase;
513 }
514
515 /*
516 * This just offers a place to put some debugging checks,
517 * and reduces the number of places "curlwp" appears...
518 */
519 static INLINE pmap_t
current_pmap(void)520 current_pmap(void)
521 {
522 struct vmspace *vm;
523 struct vm_map *map;
524 pmap_t pmap;
525
526 vm = curproc->p_vmspace;
527 map = &vm->vm_map;
528 pmap = vm_map_pmap(map);
529
530 return pmap;
531 }
532
533
534 /*************************** FUNCTION DEFINITIONS ************************
535 * These appear here merely for the compiler to enforce type checking on *
536 * all function calls. *
537 *************************************************************************/
538
539 /*
540 * Internal functions
541 */
542 a_tmgr_t *get_a_table(void);
543 b_tmgr_t *get_b_table(void);
544 c_tmgr_t *get_c_table(void);
545 int free_a_table(a_tmgr_t *, bool);
546 int free_b_table(b_tmgr_t *, bool);
547 int free_c_table(c_tmgr_t *, bool);
548
549 void pmap_bootstrap_aalign(int);
550 void pmap_alloc_usermmu(void);
551 void pmap_alloc_usertmgr(void);
552 void pmap_alloc_pv(void);
553 void pmap_init_a_tables(void);
554 void pmap_init_b_tables(void);
555 void pmap_init_c_tables(void);
556 void pmap_init_pv(void);
557 void pmap_clear_pv(paddr_t, int);
558 static INLINE bool is_managed(paddr_t);
559
560 bool pmap_remove_a(a_tmgr_t *, vaddr_t, vaddr_t);
561 bool pmap_remove_b(b_tmgr_t *, vaddr_t, vaddr_t);
562 bool pmap_remove_c(c_tmgr_t *, vaddr_t, vaddr_t);
563 void pmap_remove_pte(mmu_short_pte_t *);
564
565 void pmap_enter_kernel(vaddr_t, paddr_t, vm_prot_t);
566 static INLINE void pmap_remove_kernel(vaddr_t, vaddr_t);
567 static INLINE void pmap_protect_kernel(vaddr_t, vaddr_t, vm_prot_t);
568 static INLINE bool pmap_extract_kernel(vaddr_t, paddr_t *);
569 vaddr_t pmap_get_pteinfo(u_int, pmap_t *, c_tmgr_t **);
570 static INLINE int pmap_dereference(pmap_t);
571
572 bool pmap_stroll(pmap_t, vaddr_t, a_tmgr_t **, b_tmgr_t **, c_tmgr_t **,
573 mmu_short_pte_t **, int *, int *, int *);
574 void pmap_bootstrap_copyprom(void);
575 void pmap_takeover_mmu(void);
576 void pmap_bootstrap_setprom(void);
577 static void pmap_page_upload(void);
578
579 #ifdef PMAP_DEBUG
580 /* Debugging function definitions */
581 void pv_list(paddr_t, int);
582 #endif /* PMAP_DEBUG */
583
584 /** Interface functions
585 ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
586 ** defined.
587 ** The new UVM doesn't require them so now INTERNAL.
588 **/
589 static INLINE void pmap_pinit(pmap_t);
590 static INLINE void pmap_release(pmap_t);
591
592 /********************************** CODE ********************************
593 * Functions that are called from other parts of the kernel are labeled *
594 * as 'INTERFACE' functions. Functions that are only called from *
595 * within the pmap module are labeled as 'INTERNAL' functions. *
596 * Functions that are internal, but are not (currently) used at all are *
597 * labeled 'INTERNAL_X'. *
598 ************************************************************************/
599
600 /* pmap_bootstrap INTERNAL
601 **
602 * Initializes the pmap system. Called at boot time from
603 * locore2.c:_vm_init()
604 *
605 * Reminder: having a pmap_bootstrap_alloc() and also having the VM
606 * system implement pmap_steal_memory() is redundant.
607 * Don't release this code without removing one or the other!
608 */
609 void
pmap_bootstrap(vaddr_t nextva)610 pmap_bootstrap(vaddr_t nextva)
611 {
612 struct physmemory *membank;
613 struct pmap_physmem_struct *pmap_membank;
614 vaddr_t va, eva;
615 paddr_t pa;
616 int b, c, i, j; /* running table counts */
617 int size, resvmem;
618
619 /*
620 * This function is called by __bootstrap after it has
621 * determined the type of machine and made the appropriate
622 * patches to the ROM vectors (XXX- I don't quite know what I meant
623 * by that.) It allocates and sets up enough of the pmap system
624 * to manage the kernel's address space.
625 */
626
627 /*
628 * Determine the range of kernel virtual and physical
629 * space available. Note that we ABSOLUTELY DEPEND on
630 * the fact that the first bank of memory (4MB) is
631 * mapped linearly to KERNBASE (which we guaranteed in
632 * the first instructions of locore.s).
633 * That is plenty for our bootstrap work.
634 */
635 virtual_avail = m68k_round_page(nextva);
636 virtual_contig_end = KERNBASE3X + 0x400000; /* +4MB */
637 virtual_end = VM_MAX_KERNEL_ADDRESS;
638 /* Don't need avail_start til later. */
639
640 /* We may now call pmap_bootstrap_alloc(). */
641 bootstrap_alloc_enabled = true;
642
643 /*
644 * This is a somewhat unwrapped loop to deal with
645 * copying the PROM's 'phsymem' banks into the pmap's
646 * banks. The following is always assumed:
647 * 1. There is always at least one bank of memory.
648 * 2. There is always a last bank of memory, and its
649 * pmem_next member must be set to NULL.
650 */
651 membank = romVectorPtr->v_physmemory;
652 pmap_membank = avail_mem;
653 total_phys_mem = 0;
654
655 for (;;) { /* break on !membank */
656 pmap_membank->pmem_start = membank->address;
657 pmap_membank->pmem_end = membank->address + membank->size;
658 total_phys_mem += membank->size;
659 membank = membank->next;
660 if (!membank)
661 break;
662 /* This silly syntax arises because pmap_membank
663 * is really a pre-allocated array, but it is put into
664 * use as a linked list.
665 */
666 pmap_membank->pmem_next = pmap_membank + 1;
667 pmap_membank = pmap_membank->pmem_next;
668 }
669 /* This is the last element. */
670 pmap_membank->pmem_next = NULL;
671
672 /*
673 * Note: total_phys_mem, physmem represent
674 * actual physical memory, including that
675 * reserved for the PROM monitor.
676 */
677 physmem = btoc(total_phys_mem);
678
679 /*
680 * Avail_end is set to the first byte of physical memory
681 * after the end of the last bank. We use this only to
682 * determine if a physical address is "managed" memory.
683 * This address range should be reduced to prevent the
684 * physical pages needed by the PROM monitor from being used
685 * in the VM system.
686 */
687 resvmem = total_phys_mem - *(romVectorPtr->memoryAvail);
688 resvmem = m68k_round_page(resvmem);
689 avail_end = pmap_membank->pmem_end - resvmem;
690
691 /*
692 * First allocate enough kernel MMU tables to map all
693 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
694 * Note: All must be aligned on 256 byte boundaries.
695 * Start with the level-A table (one of those).
696 */
697 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
698 kernAbase = pmap_bootstrap_alloc(size);
699 memset(kernAbase, 0, size);
700
701 /* Now the level-B kernel tables... */
702 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
703 kernBbase = pmap_bootstrap_alloc(size);
704 memset(kernBbase, 0, size);
705
706 /* Now the level-C kernel tables... */
707 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
708 kernCbase = pmap_bootstrap_alloc(size);
709 memset(kernCbase, 0, size);
710 /*
711 * Note: In order for the PV system to work correctly, the kernel
712 * and user-level C tables must be allocated contiguously.
713 * Nothing should be allocated between here and the allocation of
714 * mmuCbase below. XXX: Should do this as one allocation, and
715 * then compute a pointer for mmuCbase instead of this...
716 *
717 * Allocate user MMU tables.
718 * These must be contiguous with the preceding.
719 */
720
721 #ifndef FIXED_NTABLES
722 /*
723 * The number of user-level C tables that should be allocated is
724 * related to the size of physical memory. In general, there should
725 * be enough tables to map four times the amount of available RAM.
726 * The extra amount is needed because some table space is wasted by
727 * fragmentation.
728 */
729 NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
730 NUM_B_TABLES = NUM_C_TABLES / 2;
731 NUM_A_TABLES = NUM_B_TABLES / 2;
732 #endif /* !FIXED_NTABLES */
733
734 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * NUM_C_TABLES;
735 mmuCbase = pmap_bootstrap_alloc(size);
736
737 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * NUM_B_TABLES;
738 mmuBbase = pmap_bootstrap_alloc(size);
739
740 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES;
741 mmuAbase = pmap_bootstrap_alloc(size);
742
743 /*
744 * Fill in the never-changing part of the kernel tables.
745 * For simplicity, the kernel's mappings will be editable as a
746 * flat array of page table entries at kernCbase. The
747 * higher level 'A' and 'B' tables must be initialized to point
748 * to this lower one.
749 */
750 b = c = 0;
751
752 /*
753 * Invalidate all mappings below KERNBASE in the A table.
754 * This area has already been zeroed out, but it is good
755 * practice to explicitly show that we are interpreting
756 * it as a list of A table descriptors.
757 */
758 for (i = 0; i < MMU_TIA(KERNBASE3X); i++) {
759 kernAbase[i].addr.raw = 0;
760 }
761
762 /*
763 * Set up the kernel A and B tables so that they will reference the
764 * correct spots in the contiguous table of PTEs allocated for the
765 * kernel's virtual memory space.
766 */
767 for (i = MMU_TIA(KERNBASE3X); i < MMU_A_TBL_SIZE; i++) {
768 kernAbase[i].attr.raw =
769 MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
770 kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
771
772 for (j = 0; j < MMU_B_TBL_SIZE; j++) {
773 kernBbase[b + j].attr.raw =
774 mmu_vtop(&kernCbase[c]) | MMU_DT_SHORT;
775 c += MMU_C_TBL_SIZE;
776 }
777 b += MMU_B_TBL_SIZE;
778 }
779
780 pmap_alloc_usermmu(); /* Allocate user MMU tables. */
781 pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/
782 pmap_alloc_pv(); /* Allocate physical->virtual map. */
783
784 /*
785 * We are now done with pmap_bootstrap_alloc(). Round up
786 * `virtual_avail' to the nearest page, and set the flag
787 * to prevent use of pmap_bootstrap_alloc() hereafter.
788 */
789 pmap_bootstrap_aalign(PAGE_SIZE);
790 bootstrap_alloc_enabled = false;
791
792 /*
793 * Now that we are done with pmap_bootstrap_alloc(), we
794 * must save the virtual and physical addresses of the
795 * end of the linearly mapped range, which are stored in
796 * virtual_contig_end and avail_start, respectively.
797 * These variables will never change after this point.
798 */
799 virtual_contig_end = virtual_avail;
800 avail_start = virtual_avail - KERNBASE3X;
801
802 /*
803 * `avail_next' is a running pointer used by pmap_next_page() to
804 * keep track of the next available physical page to be handed
805 * to the VM system during its initialization, in which it
806 * asks for physical pages, one at a time.
807 */
808 avail_next = avail_start;
809
810 /*
811 * Now allocate some virtual addresses, but not the physical pages
812 * behind them. Note that virtual_avail is already page-aligned.
813 *
814 * tmp_vpages[] is an array of two virtual pages used for temporary
815 * kernel mappings in the pmap module to facilitate various physical
816 * address-oritented operations.
817 */
818 tmp_vpages[0] = virtual_avail;
819 virtual_avail += PAGE_SIZE;
820 tmp_vpages[1] = virtual_avail;
821 virtual_avail += PAGE_SIZE;
822
823 /** Initialize the PV system **/
824 pmap_init_pv();
825
826 /*
827 * Fill in the kernel_pmap structure and kernel_crp.
828 */
829 kernAphys = mmu_vtop(kernAbase);
830 kernel_pmap.pm_a_tmgr = NULL;
831 kernel_pmap.pm_a_phys = kernAphys;
832 kernel_pmap.pm_refcount = 1; /* always in use */
833
834 kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
835 kernel_crp.rp_addr = kernAphys;
836
837 /*
838 * Now pmap_enter_kernel() may be used safely and will be
839 * the main interface used hereafter to modify the kernel's
840 * virtual address space. Note that since we are still running
841 * under the PROM's address table, none of these table modifications
842 * actually take effect until pmap_takeover_mmu() is called.
843 *
844 * Note: Our tables do NOT have the PROM linear mappings!
845 * Only the mappings created here exist in our tables, so
846 * remember to map anything we expect to use.
847 */
848 va = (vaddr_t)KERNBASE3X;
849 pa = 0;
850
851 /*
852 * The first page of the kernel virtual address space is the msgbuf
853 * page. The page attributes (data, non-cached) are set here, while
854 * the address is assigned to this global pointer in cpu_startup().
855 * It is non-cached, mostly due to paranoia.
856 */
857 pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
858 va += PAGE_SIZE;
859 pa += PAGE_SIZE;
860
861 /* Next page is used as the temporary stack. */
862 pmap_enter_kernel(va, pa, VM_PROT_ALL);
863 va += PAGE_SIZE;
864 pa += PAGE_SIZE;
865
866 /*
867 * Map all of the kernel's text segment as read-only and cacheable.
868 * (Cacheable is implied by default). Unfortunately, the last bytes
869 * of kernel text and the first bytes of kernel data will often be
870 * sharing the same page. Therefore, the last page of kernel text
871 * has to be mapped as read/write, to accommodate the data.
872 */
873 eva = m68k_trunc_page((vaddr_t)etext);
874 for (; va < eva; va += PAGE_SIZE, pa += PAGE_SIZE)
875 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
876
877 /*
878 * Map all of the kernel's data as read/write and cacheable.
879 * This includes: data, BSS, symbols, and everything in the
880 * contiguous memory used by pmap_bootstrap_alloc()
881 */
882 for (; pa < avail_start; va += PAGE_SIZE, pa += PAGE_SIZE)
883 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
884
885 /*
886 * At this point we are almost ready to take over the MMU. But first
887 * we must save the PROM's address space in our map, as we call its
888 * routines and make references to its data later in the kernel.
889 */
890 pmap_bootstrap_copyprom();
891 pmap_takeover_mmu();
892 pmap_bootstrap_setprom();
893
894 /* Notify the VM system of our page size. */
895 uvmexp.pagesize = PAGE_SIZE;
896 uvm_md_init();
897
898 pmap_page_upload();
899 }
900
901
902 /* pmap_alloc_usermmu INTERNAL
903 **
904 * Called from pmap_bootstrap() to allocate MMU tables that will
905 * eventually be used for user mappings.
906 */
907 void
pmap_alloc_usermmu(void)908 pmap_alloc_usermmu(void)
909 {
910
911 /* XXX: Moved into caller. */
912 }
913
914 /* pmap_alloc_pv INTERNAL
915 **
916 * Called from pmap_bootstrap() to allocate the physical
917 * to virtual mapping list. Each physical page of memory
918 * in the system has a corresponding element in this list.
919 */
920 void
pmap_alloc_pv(void)921 pmap_alloc_pv(void)
922 {
923 int i;
924 unsigned int total_mem;
925
926 /*
927 * Allocate a pv_head structure for every page of physical
928 * memory that will be managed by the system. Since memory on
929 * the 3/80 is non-contiguous, we cannot arrive at a total page
930 * count by subtraction of the lowest available address from the
931 * highest, but rather we have to step through each memory
932 * bank and add the number of pages in each to the total.
933 *
934 * At this time we also initialize the offset of each bank's
935 * starting pv_head within the pv_head list so that the physical
936 * memory state routines (pmap_is_referenced(),
937 * pmap_is_modified(), et al.) can quickly find corresponding
938 * pv_heads in spite of the non-contiguity.
939 */
940 total_mem = 0;
941 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
942 avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
943 total_mem += avail_mem[i].pmem_end - avail_mem[i].pmem_start;
944 if (avail_mem[i].pmem_next == NULL)
945 break;
946 }
947 pvbase = (pv_t *)pmap_bootstrap_alloc(sizeof(pv_t) *
948 m68k_btop(total_phys_mem));
949 }
950
951 /* pmap_alloc_usertmgr INTERNAL
952 **
953 * Called from pmap_bootstrap() to allocate the structures which
954 * facilitate management of user MMU tables. Each user MMU table
955 * in the system has one such structure associated with it.
956 */
957 void
pmap_alloc_usertmgr(void)958 pmap_alloc_usertmgr(void)
959 {
960 /* Allocate user MMU table managers */
961 /* It would be a lot simpler to just make these BSS, but */
962 /* we may want to change their size at boot time... -j */
963 Atmgrbase =
964 (a_tmgr_t *)pmap_bootstrap_alloc(sizeof(a_tmgr_t) * NUM_A_TABLES);
965 Btmgrbase =
966 (b_tmgr_t *)pmap_bootstrap_alloc(sizeof(b_tmgr_t) * NUM_B_TABLES);
967 Ctmgrbase =
968 (c_tmgr_t *)pmap_bootstrap_alloc(sizeof(c_tmgr_t) * NUM_C_TABLES);
969
970 /*
971 * Allocate PV list elements for the physical to virtual
972 * mapping system.
973 */
974 pvebase = (pv_elem_t *)pmap_bootstrap_alloc(sizeof(pv_elem_t) *
975 (NUM_USER_PTES + NUM_KERN_PTES));
976 }
977
978 /* pmap_bootstrap_copyprom() INTERNAL
979 **
980 * Copy the PROM mappings into our own tables. Note, we
981 * can use physical addresses until __bootstrap returns.
982 */
983 void
pmap_bootstrap_copyprom(void)984 pmap_bootstrap_copyprom(void)
985 {
986 struct sunromvec *romp;
987 int *mon_ctbl;
988 mmu_short_pte_t *kpte;
989 int i, len;
990
991 romp = romVectorPtr;
992
993 /*
994 * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND
995 * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE
996 */
997 mon_ctbl = *romp->monptaddr;
998 i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE3X);
999 kpte = &kernCbase[i];
1000 len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE);
1001
1002 for (i = 0; i < len; i++) {
1003 kpte[i].attr.raw = mon_ctbl[i];
1004 }
1005
1006 /*
1007 * Copy the mappings at MON_DVMA_BASE (to the end).
1008 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
1009 * Actually, we only want the last page, which the
1010 * PROM has set up for use by the "ie" driver.
1011 * (The i82686 needs its SCP there.)
1012 * If we copy all the mappings, pmap_enter_kernel
1013 * may complain about finding valid PTEs that are
1014 * not recorded in our PV lists...
1015 */
1016 mon_ctbl = *romp->shadowpteaddr;
1017 i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE3X);
1018 kpte = &kernCbase[i];
1019 len = m68k_btop(SUN3X_MON_DVMA_SIZE);
1020 for (i = (len - 1); i < len; i++) {
1021 kpte[i].attr.raw = mon_ctbl[i];
1022 }
1023 }
1024
1025 /* pmap_takeover_mmu INTERNAL
1026 **
1027 * Called from pmap_bootstrap() after it has copied enough of the
1028 * PROM mappings into the kernel map so that we can use our own
1029 * MMU table.
1030 */
1031 void
pmap_takeover_mmu(void)1032 pmap_takeover_mmu(void)
1033 {
1034
1035 loadcrp(&kernel_crp);
1036 }
1037
1038 /* pmap_bootstrap_setprom() INTERNAL
1039 **
1040 * Set the PROM mappings so it can see kernel space.
1041 * Note that physical addresses are used here, which
1042 * we can get away with because this runs with the
1043 * low 1GB set for transparent translation.
1044 */
1045 void
pmap_bootstrap_setprom(void)1046 pmap_bootstrap_setprom(void)
1047 {
1048 mmu_long_dte_t *mon_dte;
1049 extern struct mmu_rootptr mon_crp;
1050 int i;
1051
1052 mon_dte = (mmu_long_dte_t *)mon_crp.rp_addr;
1053 for (i = MMU_TIA(KERNBASE3X); i < MMU_TIA(KERN_END3X); i++) {
1054 mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1055 mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1056 }
1057 }
1058
1059
1060 /* pmap_init INTERFACE
1061 **
1062 * Called at the end of vm_init() to set up the pmap system to go
1063 * into full time operation. All initialization of kernel_pmap
1064 * should be already done by now, so this should just do things
1065 * needed for user-level pmaps to work.
1066 */
1067 void
pmap_init(void)1068 pmap_init(void)
1069 {
1070
1071 /** Initialize the manager pools **/
1072 TAILQ_INIT(&a_pool);
1073 TAILQ_INIT(&b_pool);
1074 TAILQ_INIT(&c_pool);
1075
1076 /**************************************************************
1077 * Initialize all tmgr structures and MMU tables they manage. *
1078 **************************************************************/
1079 /** Initialize A tables **/
1080 pmap_init_a_tables();
1081 /** Initialize B tables **/
1082 pmap_init_b_tables();
1083 /** Initialize C tables **/
1084 pmap_init_c_tables();
1085
1086 /** Initialize the pmap pools **/
1087 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1088 &pool_allocator_nointr, IPL_NONE);
1089 }
1090
1091 /* pmap_init_a_tables() INTERNAL
1092 **
1093 * Initializes all A managers, their MMU A tables, and inserts
1094 * them into the A manager pool for use by the system.
1095 */
1096 void
pmap_init_a_tables(void)1097 pmap_init_a_tables(void)
1098 {
1099 int i;
1100 a_tmgr_t *a_tbl;
1101
1102 for (i = 0; i < NUM_A_TABLES; i++) {
1103 /* Select the next available A manager from the pool */
1104 a_tbl = &Atmgrbase[i];
1105
1106 /*
1107 * Clear its parent entry. Set its wired and valid
1108 * entry count to zero.
1109 */
1110 a_tbl->at_parent = NULL;
1111 a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1112
1113 /* Assign it the next available MMU A table from the pool */
1114 a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1115
1116 /*
1117 * Initialize the MMU A table with the table in the `lwp0',
1118 * or kernel, mapping. This ensures that every process has
1119 * the kernel mapped in the top part of its address space.
1120 */
1121 memcpy(a_tbl->at_dtbl, kernAbase,
1122 MMU_A_TBL_SIZE * sizeof(mmu_long_dte_t));
1123
1124 /*
1125 * Finally, insert the manager into the A pool,
1126 * making it ready to be used by the system.
1127 */
1128 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1129 }
1130 }
1131
1132 /* pmap_init_b_tables() INTERNAL
1133 **
1134 * Initializes all B table managers, their MMU B tables, and
1135 * inserts them into the B manager pool for use by the system.
1136 */
1137 void
pmap_init_b_tables(void)1138 pmap_init_b_tables(void)
1139 {
1140 int i, j;
1141 b_tmgr_t *b_tbl;
1142
1143 for (i = 0; i < NUM_B_TABLES; i++) {
1144 /* Select the next available B manager from the pool */
1145 b_tbl = &Btmgrbase[i];
1146
1147 b_tbl->bt_parent = NULL; /* clear its parent, */
1148 b_tbl->bt_pidx = 0; /* parent index, */
1149 b_tbl->bt_wcnt = 0; /* wired entry count, */
1150 b_tbl->bt_ecnt = 0; /* valid entry count. */
1151
1152 /* Assign it the next available MMU B table from the pool */
1153 b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1154
1155 /* Invalidate every descriptor in the table */
1156 for (j = 0; j < MMU_B_TBL_SIZE; j++)
1157 b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1158
1159 /* Insert the manager into the B pool */
1160 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1161 }
1162 }
1163
1164 /* pmap_init_c_tables() INTERNAL
1165 **
1166 * Initializes all C table managers, their MMU C tables, and
1167 * inserts them into the C manager pool for use by the system.
1168 */
1169 void
pmap_init_c_tables(void)1170 pmap_init_c_tables(void)
1171 {
1172 int i, j;
1173 c_tmgr_t *c_tbl;
1174
1175 for (i = 0; i < NUM_C_TABLES; i++) {
1176 /* Select the next available C manager from the pool */
1177 c_tbl = &Ctmgrbase[i];
1178
1179 c_tbl->ct_parent = NULL; /* clear its parent, */
1180 c_tbl->ct_pidx = 0; /* parent index, */
1181 c_tbl->ct_wcnt = 0; /* wired entry count, */
1182 c_tbl->ct_ecnt = 0; /* valid entry count, */
1183 c_tbl->ct_pmap = NULL; /* parent pmap, */
1184 c_tbl->ct_va = 0; /* base of managed range */
1185
1186 /* Assign it the next available MMU C table from the pool */
1187 c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1188
1189 for (j = 0; j < MMU_C_TBL_SIZE; j++)
1190 c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1191
1192 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1193 }
1194 }
1195
1196 /* pmap_init_pv() INTERNAL
1197 **
1198 * Initializes the Physical to Virtual mapping system.
1199 */
1200 void
pmap_init_pv(void)1201 pmap_init_pv(void)
1202 {
1203 int i;
1204
1205 /* Initialize every PV head. */
1206 for (i = 0; i < m68k_btop(total_phys_mem); i++) {
1207 pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */
1208 pvbase[i].pv_flags = 0; /* Zero out page flags */
1209 }
1210 }
1211
1212 /* is_managed INTERNAL
1213 **
1214 * Determine if the given physical address is managed by the PV system.
1215 * Note that this logic assumes that no one will ask for the status of
1216 * addresses which lie in-between the memory banks on the 3/80. If they
1217 * do so, it will falsely report that it is managed.
1218 *
1219 * Note: A "managed" address is one that was reported to the VM system as
1220 * a "usable page" during system startup. As such, the VM system expects the
1221 * pmap module to keep an accurate track of the usage of those pages.
1222 * Any page not given to the VM system at startup does not exist (as far as
1223 * the VM system is concerned) and is therefore "unmanaged." Examples are
1224 * those pages which belong to the ROM monitor and the memory allocated before
1225 * the VM system was started.
1226 */
1227 static INLINE bool
is_managed(paddr_t pa)1228 is_managed(paddr_t pa)
1229 {
1230 if (pa >= avail_start && pa < avail_end)
1231 return true;
1232 else
1233 return false;
1234 }
1235
1236 /* get_a_table INTERNAL
1237 **
1238 * Retrieve and return a level A table for use in a user map.
1239 */
1240 a_tmgr_t *
get_a_table(void)1241 get_a_table(void)
1242 {
1243 a_tmgr_t *tbl;
1244 pmap_t pmap;
1245
1246 /* Get the top A table in the pool */
1247 tbl = TAILQ_FIRST(&a_pool);
1248 if (tbl == NULL) {
1249 /*
1250 * XXX - Instead of panicking here and in other get_x_table
1251 * functions, we do have the option of sleeping on the head of
1252 * the table pool. Any function which updates the table pool
1253 * would then issue a wakeup() on the head, thus waking up any
1254 * processes waiting for a table.
1255 *
1256 * Actually, the place to sleep would be when some process
1257 * asks for a "wired" mapping that would run us short of
1258 * mapping resources. This design DEPENDS on always having
1259 * some mapping resources in the pool for stealing, so we
1260 * must make sure we NEVER let the pool become empty. -gwr
1261 */
1262 panic("get_a_table: out of A tables.");
1263 }
1264
1265 TAILQ_REMOVE(&a_pool, tbl, at_link);
1266 /*
1267 * If the table has a non-null parent pointer then it is in use.
1268 * Forcibly abduct it from its parent and clear its entries.
1269 * No re-entrancy worries here. This table would not be in the
1270 * table pool unless it was available for use.
1271 *
1272 * Note that the second argument to free_a_table() is false. This
1273 * indicates that the table should not be relinked into the A table
1274 * pool. That is a job for the function that called us.
1275 */
1276 if (tbl->at_parent) {
1277 KASSERT(tbl->at_wcnt == 0);
1278 pmap = tbl->at_parent;
1279 free_a_table(tbl, false);
1280 pmap->pm_a_tmgr = NULL;
1281 pmap->pm_a_phys = kernAphys;
1282 }
1283 return tbl;
1284 }
1285
1286 /* get_b_table INTERNAL
1287 **
1288 * Return a level B table for use.
1289 */
1290 b_tmgr_t *
get_b_table(void)1291 get_b_table(void)
1292 {
1293 b_tmgr_t *tbl;
1294
1295 /* See 'get_a_table' for comments. */
1296 tbl = TAILQ_FIRST(&b_pool);
1297 if (tbl == NULL)
1298 panic("get_b_table: out of B tables.");
1299 TAILQ_REMOVE(&b_pool, tbl, bt_link);
1300 if (tbl->bt_parent) {
1301 KASSERT(tbl->bt_wcnt == 0);
1302 tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1303 tbl->bt_parent->at_ecnt--;
1304 free_b_table(tbl, false);
1305 }
1306 return tbl;
1307 }
1308
1309 /* get_c_table INTERNAL
1310 **
1311 * Return a level C table for use.
1312 */
1313 c_tmgr_t *
get_c_table(void)1314 get_c_table(void)
1315 {
1316 c_tmgr_t *tbl;
1317
1318 /* See 'get_a_table' for comments */
1319 tbl = TAILQ_FIRST(&c_pool);
1320 if (tbl == NULL)
1321 panic("get_c_table: out of C tables.");
1322 TAILQ_REMOVE(&c_pool, tbl, ct_link);
1323 if (tbl->ct_parent) {
1324 KASSERT(tbl->ct_wcnt == 0);
1325 tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1326 tbl->ct_parent->bt_ecnt--;
1327 free_c_table(tbl, false);
1328 }
1329 return tbl;
1330 }
1331
1332 /*
1333 * The following 'free_table' and 'steal_table' functions are called to
1334 * detach tables from their current obligations (parents and children) and
1335 * prepare them for reuse in another mapping.
1336 *
1337 * Free_table is used when the calling function will handle the fate
1338 * of the parent table, such as returning it to the free pool when it has
1339 * no valid entries. Functions that do not want to handle this should
1340 * call steal_table, in which the parent table's descriptors and entry
1341 * count are automatically modified when this table is removed.
1342 */
1343
1344 /* free_a_table INTERNAL
1345 **
1346 * Unmaps the given A table and all child tables from their current
1347 * mappings. Returns the number of pages that were invalidated.
1348 * If 'relink' is true, the function will return the table to the head
1349 * of the available table pool.
1350 *
1351 * Cache note: The MC68851 will automatically flush all
1352 * descriptors derived from a given A table from its
1353 * Automatic Translation Cache (ATC) if we issue a
1354 * 'PFLUSHR' instruction with the base address of the
1355 * table. This function should do, and does so.
1356 * Note note: We are using an MC68030 - there is no
1357 * PFLUSHR.
1358 */
1359 int
free_a_table(a_tmgr_t * a_tbl,bool relink)1360 free_a_table(a_tmgr_t *a_tbl, bool relink)
1361 {
1362 int i, removed_cnt;
1363 mmu_long_dte_t *dte;
1364 mmu_short_dte_t *dtbl;
1365 b_tmgr_t *b_tbl;
1366 uint8_t at_wired, bt_wired;
1367
1368 /*
1369 * Flush the ATC cache of all cached descriptors derived
1370 * from this table.
1371 * Sun3x does not use 68851's cached table feature
1372 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1373 */
1374
1375 /*
1376 * Remove any pending cache flushes that were designated
1377 * for the pmap this A table belongs to.
1378 * a_tbl->parent->atc_flushq[0] = 0;
1379 * Not implemented in sun3x.
1380 */
1381
1382 /*
1383 * All A tables in the system should retain a map for the
1384 * kernel. If the table contains any valid descriptors
1385 * (other than those for the kernel area), invalidate them all,
1386 * stopping short of the kernel's entries.
1387 */
1388 removed_cnt = 0;
1389 at_wired = a_tbl->at_wcnt;
1390 if (a_tbl->at_ecnt) {
1391 dte = a_tbl->at_dtbl;
1392 for (i = 0; i < MMU_TIA(KERNBASE3X); i++) {
1393 /*
1394 * If a table entry points to a valid B table, free
1395 * it and its children.
1396 */
1397 if (MMU_VALID_DT(dte[i])) {
1398 /*
1399 * The following block does several things,
1400 * from innermost expression to the
1401 * outermost:
1402 * 1) It extracts the base (cc 1996)
1403 * address of the B table pointed
1404 * to in the A table entry dte[i].
1405 * 2) It converts this base address into
1406 * the virtual address it can be
1407 * accessed with. (all MMU tables point
1408 * to physical addresses.)
1409 * 3) It finds the corresponding manager
1410 * structure which manages this MMU table.
1411 * 4) It frees the manager structure.
1412 * (This frees the MMU table and all
1413 * child tables. See 'free_b_table' for
1414 * details.)
1415 */
1416 dtbl = mmu_ptov(dte[i].addr.raw);
1417 b_tbl = mmuB2tmgr(dtbl);
1418 bt_wired = b_tbl->bt_wcnt;
1419 removed_cnt += free_b_table(b_tbl, true);
1420 if (bt_wired)
1421 a_tbl->at_wcnt--;
1422 dte[i].attr.raw = MMU_DT_INVALID;
1423 }
1424 }
1425 a_tbl->at_ecnt = 0;
1426 }
1427 KASSERT(a_tbl->at_wcnt == 0);
1428
1429 if (relink) {
1430 a_tbl->at_parent = NULL;
1431 if (!at_wired)
1432 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1433 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1434 }
1435 return removed_cnt;
1436 }
1437
1438 /* free_b_table INTERNAL
1439 **
1440 * Unmaps the given B table and all its children from their current
1441 * mappings. Returns the number of pages that were invalidated.
1442 * (For comments, see 'free_a_table()').
1443 */
1444 int
free_b_table(b_tmgr_t * b_tbl,bool relink)1445 free_b_table(b_tmgr_t *b_tbl, bool relink)
1446 {
1447 int i, removed_cnt;
1448 mmu_short_dte_t *dte;
1449 mmu_short_pte_t *dtbl;
1450 c_tmgr_t *c_tbl;
1451 uint8_t bt_wired, ct_wired;
1452
1453 removed_cnt = 0;
1454 bt_wired = b_tbl->bt_wcnt;
1455 if (b_tbl->bt_ecnt) {
1456 dte = b_tbl->bt_dtbl;
1457 for (i = 0; i < MMU_B_TBL_SIZE; i++) {
1458 if (MMU_VALID_DT(dte[i])) {
1459 dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1460 c_tbl = mmuC2tmgr(dtbl);
1461 ct_wired = c_tbl->ct_wcnt;
1462 removed_cnt += free_c_table(c_tbl, true);
1463 if (ct_wired)
1464 b_tbl->bt_wcnt--;
1465 dte[i].attr.raw = MMU_DT_INVALID;
1466 }
1467 }
1468 b_tbl->bt_ecnt = 0;
1469 }
1470 KASSERT(b_tbl->bt_wcnt == 0);
1471
1472 if (relink) {
1473 b_tbl->bt_parent = NULL;
1474 if (!bt_wired)
1475 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1476 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1477 }
1478 return removed_cnt;
1479 }
1480
1481 /* free_c_table INTERNAL
1482 **
1483 * Unmaps the given C table from use and returns it to the pool for
1484 * re-use. Returns the number of pages that were invalidated.
1485 *
1486 * This function preserves any physical page modification information
1487 * contained in the page descriptors within the C table by calling
1488 * 'pmap_remove_pte().'
1489 */
1490 int
free_c_table(c_tmgr_t * c_tbl,bool relink)1491 free_c_table(c_tmgr_t *c_tbl, bool relink)
1492 {
1493 mmu_short_pte_t *c_pte;
1494 int i, removed_cnt;
1495 uint8_t ct_wired;
1496
1497 removed_cnt = 0;
1498 ct_wired = c_tbl->ct_wcnt;
1499 if (c_tbl->ct_ecnt) {
1500 for (i = 0; i < MMU_C_TBL_SIZE; i++) {
1501 c_pte = &c_tbl->ct_dtbl[i];
1502 if (MMU_VALID_DT(*c_pte)) {
1503 if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)
1504 c_tbl->ct_wcnt--;
1505 pmap_remove_pte(c_pte);
1506 removed_cnt++;
1507 }
1508 }
1509 c_tbl->ct_ecnt = 0;
1510 }
1511 KASSERT(c_tbl->ct_wcnt == 0);
1512
1513 if (relink) {
1514 c_tbl->ct_parent = NULL;
1515 if (!ct_wired)
1516 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1517 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1518 }
1519 return removed_cnt;
1520 }
1521
1522
1523 /* pmap_remove_pte INTERNAL
1524 **
1525 * Unmap the given pte and preserve any page modification
1526 * information by transferring it to the pv head of the
1527 * physical page it maps to. This function does not update
1528 * any reference counts because it is assumed that the calling
1529 * function will do so.
1530 */
1531 void
pmap_remove_pte(mmu_short_pte_t * pte)1532 pmap_remove_pte(mmu_short_pte_t *pte)
1533 {
1534 u_short pv_idx, targ_idx;
1535 paddr_t pa;
1536 pv_t *pv;
1537
1538 pa = MMU_PTE_PA(*pte);
1539 if (is_managed(pa)) {
1540 pv = pa2pv(pa);
1541 targ_idx = pteidx(pte); /* Index of PTE being removed */
1542
1543 /*
1544 * If the PTE being removed is the first (or only) PTE in
1545 * the list of PTEs currently mapped to this page, remove the
1546 * PTE by changing the index found on the PV head. Otherwise
1547 * a linear search through the list will have to be executed
1548 * in order to find the PVE which points to the PTE being
1549 * removed, so that it may be modified to point to its new
1550 * neighbor.
1551 */
1552
1553 pv_idx = pv->pv_idx; /* Index of first PTE in PV list */
1554 if (pv_idx == targ_idx) {
1555 pv->pv_idx = pvebase[targ_idx].pve_next;
1556 } else {
1557
1558 /*
1559 * Find the PV element pointing to the target
1560 * element. Note: may have pv_idx==PVE_EOL
1561 */
1562
1563 for (;;) {
1564 if (pv_idx == PVE_EOL) {
1565 goto pv_not_found;
1566 }
1567 if (pvebase[pv_idx].pve_next == targ_idx)
1568 break;
1569 pv_idx = pvebase[pv_idx].pve_next;
1570 }
1571
1572 /*
1573 * At this point, pv_idx is the index of the PV
1574 * element just before the target element in the list.
1575 * Unlink the target.
1576 */
1577
1578 pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1579 }
1580
1581 /*
1582 * Save the mod/ref bits of the pte by simply
1583 * ORing the entire pte onto the pv_flags member
1584 * of the pv structure.
1585 * There is no need to use a separate bit pattern
1586 * for usage information on the pv head than that
1587 * which is used on the MMU ptes.
1588 */
1589
1590 pv_not_found:
1591 pv->pv_flags |= (u_short) pte->attr.raw;
1592 }
1593 pte->attr.raw = MMU_DT_INVALID;
1594 }
1595
1596 /* pmap_stroll INTERNAL
1597 **
1598 * Retrieve the addresses of all table managers involved in the mapping of
1599 * the given virtual address. If the table walk completed successfully,
1600 * return true. If it was only partially successful, return false.
1601 * The table walk performed by this function is important to many other
1602 * functions in this module.
1603 *
1604 * Note: This function ought to be easier to read.
1605 */
1606 bool
pmap_stroll(pmap_t pmap,vaddr_t va,a_tmgr_t ** a_tbl,b_tmgr_t ** b_tbl,c_tmgr_t ** c_tbl,mmu_short_pte_t ** pte,int * a_idx,int * b_idx,int * pte_idx)1607 pmap_stroll(pmap_t pmap, vaddr_t va, a_tmgr_t **a_tbl, b_tmgr_t **b_tbl,
1608 c_tmgr_t **c_tbl, mmu_short_pte_t **pte, int *a_idx, int *b_idx,
1609 int *pte_idx)
1610 {
1611 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1612 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1613
1614 if (pmap == pmap_kernel())
1615 return false;
1616
1617 /* Does the given pmap have its own A table? */
1618 *a_tbl = pmap->pm_a_tmgr;
1619 if (*a_tbl == NULL)
1620 return false; /* No. Return unknown. */
1621 /* Does the A table have a valid B table
1622 * under the corresponding table entry?
1623 */
1624 *a_idx = MMU_TIA(va);
1625 a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1626 if (!MMU_VALID_DT(*a_dte))
1627 return false; /* No. Return unknown. */
1628 /* Yes. Extract B table from the A table. */
1629 *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1630 /*
1631 * Does the B table have a valid C table
1632 * under the corresponding table entry?
1633 */
1634 *b_idx = MMU_TIB(va);
1635 b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1636 if (!MMU_VALID_DT(*b_dte))
1637 return false; /* No. Return unknown. */
1638 /* Yes. Extract C table from the B table. */
1639 *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1640 *pte_idx = MMU_TIC(va);
1641 *pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1642
1643 return true;
1644 }
1645
1646 /* pmap_enter INTERFACE
1647 **
1648 * Called by the kernel to map a virtual address
1649 * to a physical address in the given process map.
1650 *
1651 * Note: this function should apply an exclusive lock
1652 * on the pmap system for its duration. (it certainly
1653 * would save my hair!!)
1654 * This function ought to be easier to read.
1655 */
1656 int
pmap_enter(pmap_t pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)1657 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1658 {
1659 bool insert, managed; /* Marks the need for PV insertion.*/
1660 u_short nidx; /* PV list index */
1661 int mapflags; /* Flags for the mapping (see NOTE1) */
1662 u_int a_idx, b_idx, pte_idx; /* table indices */
1663 a_tmgr_t *a_tbl; /* A: long descriptor table manager */
1664 b_tmgr_t *b_tbl; /* B: short descriptor table manager */
1665 c_tmgr_t *c_tbl; /* C: short page table manager */
1666 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1667 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1668 mmu_short_pte_t *c_pte; /* C: short page descriptor table */
1669 pv_t *pv; /* pv list head */
1670 bool wired; /* is the mapping to be wired? */
1671 enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */
1672
1673 if (pmap == pmap_kernel()) {
1674 pmap_enter_kernel(va, pa, prot);
1675 return 0;
1676 }
1677
1678 /*
1679 * Determine if the mapping should be wired.
1680 */
1681 wired = ((flags & PMAP_WIRED) != 0);
1682
1683 /*
1684 * NOTE1:
1685 *
1686 * On November 13, 1999, someone changed the pmap_enter() API such
1687 * that it now accepts a 'flags' argument. This new argument
1688 * contains bit-flags for the architecture-independent (UVM) system to
1689 * use in signalling certain mapping requirements to the architecture-
1690 * dependent (pmap) system. The argument it replaces, 'wired', is now
1691 * one of the flags within it.
1692 *
1693 * In addition to flags signaled by the architecture-independent
1694 * system, parts of the architecture-dependent section of the sun3x
1695 * kernel pass their own flags in the lower, unused bits of the
1696 * physical address supplied to this function. These flags are
1697 * extracted and stored in the temporary variable 'mapflags'.
1698 *
1699 * Extract sun3x specific flags from the physical address.
1700 */
1701 mapflags = (pa & ~MMU_PAGE_MASK);
1702 pa &= MMU_PAGE_MASK;
1703
1704 /*
1705 * Determine if the physical address being mapped is on-board RAM.
1706 * Any other area of the address space is likely to belong to a
1707 * device and hence it would be disastrous to cache its contents.
1708 */
1709 if ((managed = is_managed(pa)) == false)
1710 mapflags |= PMAP_NC;
1711
1712 /*
1713 * For user mappings we walk along the MMU tables of the given
1714 * pmap, reaching a PTE which describes the virtual page being
1715 * mapped or changed. If any level of the walk ends in an invalid
1716 * entry, a table must be allocated and the entry must be updated
1717 * to point to it.
1718 * There is a bit of confusion as to whether this code must be
1719 * re-entrant. For now we will assume it is. To support
1720 * re-entrancy we must unlink tables from the table pool before
1721 * we assume we may use them. Tables are re-linked into the pool
1722 * when we are finished with them at the end of the function.
1723 * But I don't feel like doing that until we have proof that this
1724 * needs to be re-entrant.
1725 * 'llevel' records which tables need to be relinked.
1726 */
1727 llevel = NONE;
1728
1729 /*
1730 * Step 1 - Retrieve the A table from the pmap. If it has no
1731 * A table, allocate a new one from the available pool.
1732 */
1733
1734 a_tbl = pmap->pm_a_tmgr;
1735 if (a_tbl == NULL) {
1736 /*
1737 * This pmap does not currently have an A table. Allocate
1738 * a new one.
1739 */
1740 a_tbl = get_a_table();
1741 a_tbl->at_parent = pmap;
1742
1743 /*
1744 * Assign this new A table to the pmap, and calculate its
1745 * physical address so that loadcrp() can be used to make
1746 * the table active.
1747 */
1748 pmap->pm_a_tmgr = a_tbl;
1749 pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1750
1751 /*
1752 * If the process receiving a new A table is the current
1753 * process, we are responsible for setting the MMU so that
1754 * it becomes the current address space. This only adds
1755 * new mappings, so no need to flush anything.
1756 */
1757 if (pmap == current_pmap()) {
1758 kernel_crp.rp_addr = pmap->pm_a_phys;
1759 loadcrp(&kernel_crp);
1760 }
1761
1762 if (!wired)
1763 llevel = NEWA;
1764 } else {
1765 /*
1766 * Use the A table already allocated for this pmap.
1767 * Unlink it from the A table pool if necessary.
1768 */
1769 if (wired && !a_tbl->at_wcnt)
1770 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1771 }
1772
1773 /*
1774 * Step 2 - Walk into the B table. If there is no valid B table,
1775 * allocate one.
1776 */
1777
1778 a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */
1779 a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1780 if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */
1781 /* The descriptor is valid. Use the B table it points to. */
1782 /*************************************
1783 * a_idx *
1784 * v *
1785 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1786 * | | | | | | | | | | | | *
1787 * +-+-+-+-+-+-+-+-+-+-+-+- *
1788 * | *
1789 * \- b_tbl -> +-+- *
1790 * | | *
1791 * +-+- *
1792 *************************************/
1793 b_dte = mmu_ptov(a_dte->addr.raw);
1794 b_tbl = mmuB2tmgr(b_dte);
1795
1796 /*
1797 * If the requested mapping must be wired, but this table
1798 * being used to map it is not, the table must be removed
1799 * from the available pool and its wired entry count
1800 * incremented.
1801 */
1802 if (wired && !b_tbl->bt_wcnt) {
1803 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1804 a_tbl->at_wcnt++;
1805 }
1806 } else {
1807 /* The descriptor is invalid. Allocate a new B table. */
1808 b_tbl = get_b_table();
1809
1810 /* Point the parent A table descriptor to this new B table. */
1811 a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1812 a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1813 a_tbl->at_ecnt++; /* Update parent's valid entry count */
1814
1815 /* Create the necessary back references to the parent table */
1816 b_tbl->bt_parent = a_tbl;
1817 b_tbl->bt_pidx = a_idx;
1818
1819 /*
1820 * If this table is to be wired, make sure the parent A table
1821 * wired count is updated to reflect that it has another wired
1822 * entry.
1823 */
1824 if (wired)
1825 a_tbl->at_wcnt++;
1826 else if (llevel == NONE)
1827 llevel = NEWB;
1828 }
1829
1830 /*
1831 * Step 3 - Walk into the C table, if there is no valid C table,
1832 * allocate one.
1833 */
1834
1835 b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */
1836 b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1837 if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */
1838 /* The descriptor is valid. Use the C table it points to. */
1839 /**************************************
1840 * c_idx *
1841 * | v *
1842 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1843 * | | | | | | | | | | | *
1844 * +-+-+-+-+-+-+-+-+-+-+- *
1845 * | *
1846 * \- c_tbl -> +-+-- *
1847 * | | | *
1848 * +-+-- *
1849 **************************************/
1850 c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1851 c_tbl = mmuC2tmgr(c_pte);
1852
1853 /* If mapping is wired and table is not */
1854 if (wired && !c_tbl->ct_wcnt) {
1855 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1856 b_tbl->bt_wcnt++;
1857 }
1858 } else {
1859 /* The descriptor is invalid. Allocate a new C table. */
1860 c_tbl = get_c_table();
1861
1862 /* Point the parent B table descriptor to this new C table. */
1863 b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1864 b_dte->attr.raw |= MMU_DT_SHORT;
1865 b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1866
1867 /* Create the necessary back references to the parent table */
1868 c_tbl->ct_parent = b_tbl;
1869 c_tbl->ct_pidx = b_idx;
1870 /*
1871 * Store the pmap and base virtual managed address for faster
1872 * retrieval in the PV functions.
1873 */
1874 c_tbl->ct_pmap = pmap;
1875 c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
1876
1877 /*
1878 * If this table is to be wired, make sure the parent B table
1879 * wired count is updated to reflect that it has another wired
1880 * entry.
1881 */
1882 if (wired)
1883 b_tbl->bt_wcnt++;
1884 else if (llevel == NONE)
1885 llevel = NEWC;
1886 }
1887
1888 /*
1889 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1890 * slot of the C table, describing the PA to which the VA is mapped.
1891 */
1892
1893 pte_idx = MMU_TIC(va);
1894 c_pte = &c_tbl->ct_dtbl[pte_idx];
1895 if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1896 /*
1897 * The PTE is currently valid. This particular call
1898 * is just a synonym for one (or more) of the following
1899 * operations:
1900 * change protection of a page
1901 * change wiring status of a page
1902 * remove the mapping of a page
1903 */
1904
1905 /* First check if this is a wiring operation. */
1906 if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED) {
1907 /*
1908 * The existing mapping is wired, so adjust wired
1909 * entry count here. If new mapping is still wired,
1910 * wired entry count will be incremented again later.
1911 */
1912 c_tbl->ct_wcnt--;
1913 if (!wired) {
1914 /*
1915 * The mapping of this PTE is being changed
1916 * from wired to unwired.
1917 * Adjust wired entry counts in each table and
1918 * set llevel flag to put unwired tables back
1919 * into the active pool.
1920 */
1921 if (c_tbl->ct_wcnt == 0) {
1922 llevel = NEWC;
1923 if (--b_tbl->bt_wcnt == 0) {
1924 llevel = NEWB;
1925 if (--a_tbl->at_wcnt == 0) {
1926 llevel = NEWA;
1927 }
1928 }
1929 }
1930 }
1931 }
1932
1933 /* Is the new address the same as the old? */
1934 if (MMU_PTE_PA(*c_pte) == pa) {
1935 /*
1936 * Yes, mark that it does not need to be reinserted
1937 * into the PV list.
1938 */
1939 insert = false;
1940
1941 /*
1942 * Clear all but the modified, referenced and wired
1943 * bits on the PTE.
1944 */
1945 c_pte->attr.raw &= (MMU_SHORT_PTE_M
1946 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1947 } else {
1948 /* No, remove the old entry */
1949 pmap_remove_pte(c_pte);
1950 insert = true;
1951 }
1952
1953 /*
1954 * TLB flush is only necessary if modifying current map.
1955 * However, in pmap_enter(), the pmap almost always IS
1956 * the current pmap, so don't even bother to check.
1957 */
1958 TBIS(va);
1959 } else {
1960 /*
1961 * The PTE is invalid. Increment the valid entry count in
1962 * the C table manager to reflect the addition of a new entry.
1963 */
1964 c_tbl->ct_ecnt++;
1965
1966 /* XXX - temporarily make sure the PTE is cleared. */
1967 c_pte->attr.raw = 0;
1968
1969 /* It will also need to be inserted into the PV list. */
1970 insert = true;
1971 }
1972
1973 /*
1974 * If page is changing from unwired to wired status, set an unused bit
1975 * within the PTE to indicate that it is wired. Also increment the
1976 * wired entry count in the C table manager.
1977 */
1978 if (wired) {
1979 c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
1980 c_tbl->ct_wcnt++;
1981 }
1982
1983 /*
1984 * Map the page, being careful to preserve modify/reference/wired
1985 * bits. At this point it is assumed that the PTE either has no bits
1986 * set, or if there are set bits, they are only modified, reference or
1987 * wired bits. If not, the following statement will cause erratic
1988 * behavior.
1989 */
1990 #ifdef PMAP_DEBUG
1991 if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
1992 MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
1993 printf("pmap_enter: junk left in PTE at %p\n", c_pte);
1994 Debugger();
1995 }
1996 #endif
1997 c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
1998
1999 /*
2000 * If the mapping should be read-only, set the write protect
2001 * bit in the PTE.
2002 */
2003 if (!(prot & VM_PROT_WRITE))
2004 c_pte->attr.raw |= MMU_SHORT_PTE_WP;
2005
2006 /*
2007 * Mark the PTE as used and/or modified as specified by the flags arg.
2008 */
2009 if (flags & VM_PROT_ALL) {
2010 c_pte->attr.raw |= MMU_SHORT_PTE_USED;
2011 if (flags & VM_PROT_WRITE) {
2012 c_pte->attr.raw |= MMU_SHORT_PTE_M;
2013 }
2014 }
2015
2016 /*
2017 * If the mapping should be cache inhibited (indicated by the flag
2018 * bits found on the lower order of the physical address.)
2019 * mark the PTE as a cache inhibited page.
2020 */
2021 if (mapflags & PMAP_NC)
2022 c_pte->attr.raw |= MMU_SHORT_PTE_CI;
2023
2024 /*
2025 * If the physical address being mapped is managed by the PV
2026 * system then link the pte into the list of pages mapped to that
2027 * address.
2028 */
2029 if (insert && managed) {
2030 pv = pa2pv(pa);
2031 nidx = pteidx(c_pte);
2032
2033 pvebase[nidx].pve_next = pv->pv_idx;
2034 pv->pv_idx = nidx;
2035 }
2036
2037 /* Move any allocated or unwired tables back into the active pool. */
2038
2039 switch (llevel) {
2040 case NEWA:
2041 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2042 /* FALLTHROUGH */
2043 case NEWB:
2044 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2045 /* FALLTHROUGH */
2046 case NEWC:
2047 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2048 /* FALLTHROUGH */
2049 default:
2050 break;
2051 }
2052
2053 return 0;
2054 }
2055
2056 /* pmap_enter_kernel INTERNAL
2057 **
2058 * Map the given virtual address to the given physical address within the
2059 * kernel address space. This function exists because the kernel map does
2060 * not do dynamic table allocation. It consists of a contiguous array of ptes
2061 * and can be edited directly without the need to walk through any tables.
2062 *
2063 * XXX: "Danger, Will Robinson!"
2064 * Note that the kernel should never take a fault on any page
2065 * between [ KERNBASE .. virtual_avail ] and this is checked in
2066 * trap.c for kernel-mode MMU faults. This means that mappings
2067 * created in that range must be implicitly wired. -gwr
2068 */
2069 void
pmap_enter_kernel(vaddr_t va,paddr_t pa,vm_prot_t prot)2070 pmap_enter_kernel(vaddr_t va, paddr_t pa, vm_prot_t prot)
2071 {
2072 bool was_valid, insert;
2073 u_short pte_idx;
2074 int flags;
2075 mmu_short_pte_t *pte;
2076 pv_t *pv;
2077 paddr_t old_pa;
2078
2079 flags = (pa & ~MMU_PAGE_MASK);
2080 pa &= MMU_PAGE_MASK;
2081
2082 if (is_managed(pa))
2083 insert = true;
2084 else
2085 insert = false;
2086
2087 /*
2088 * Calculate the index of the PTE being modified.
2089 */
2090 pte_idx = (u_long)m68k_btop(va - KERNBASE3X);
2091
2092 /* This array is traditionally named "Sysmap" */
2093 pte = &kernCbase[pte_idx];
2094
2095 if (MMU_VALID_DT(*pte)) {
2096 was_valid = true;
2097 /*
2098 * If the PTE already maps a different
2099 * physical address, umap and pv_unlink.
2100 */
2101 old_pa = MMU_PTE_PA(*pte);
2102 if (pa != old_pa)
2103 pmap_remove_pte(pte);
2104 else {
2105 /*
2106 * Old PA and new PA are the same. No need to
2107 * relink the mapping within the PV list.
2108 */
2109 insert = false;
2110
2111 /*
2112 * Save any mod/ref bits on the PTE.
2113 */
2114 pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2115 }
2116 } else {
2117 pte->attr.raw = MMU_DT_INVALID;
2118 was_valid = false;
2119 }
2120
2121 /*
2122 * Map the page. Being careful to preserve modified/referenced bits
2123 * on the PTE.
2124 */
2125 pte->attr.raw |= (pa | MMU_DT_PAGE);
2126
2127 if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2128 pte->attr.raw |= MMU_SHORT_PTE_WP;
2129 if (flags & PMAP_NC)
2130 pte->attr.raw |= MMU_SHORT_PTE_CI;
2131 if (was_valid)
2132 TBIS(va);
2133
2134 /*
2135 * Insert the PTE into the PV system, if need be.
2136 */
2137 if (insert) {
2138 pv = pa2pv(pa);
2139 pvebase[pte_idx].pve_next = pv->pv_idx;
2140 pv->pv_idx = pte_idx;
2141 }
2142 }
2143
2144 void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)2145 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
2146 {
2147 mmu_short_pte_t *pte;
2148 u_int mapflags;
2149
2150 /* XXX: MD PMAP_NC should be replaced by MI PMAP_NOCACHE in flags. */
2151 mapflags = (pa & ~MMU_PAGE_MASK);
2152 if ((mapflags & PMAP_NC) != 0)
2153 flags |= PMAP_NOCACHE;
2154
2155 /* This array is traditionally named "Sysmap" */
2156 pte = &kernCbase[(u_long)m68k_btop(va - KERNBASE3X)];
2157
2158 KASSERT(!MMU_VALID_DT(*pte));
2159 pte->attr.raw = MMU_DT_INVALID | MMU_DT_PAGE | (pa & MMU_PAGE_MASK);
2160 if (!(prot & VM_PROT_WRITE))
2161 pte->attr.raw |= MMU_SHORT_PTE_WP;
2162 if ((flags & PMAP_NOCACHE) != 0)
2163 pte->attr.raw |= MMU_SHORT_PTE_CI;
2164 }
2165
2166 void
pmap_kremove(vaddr_t va,vsize_t len)2167 pmap_kremove(vaddr_t va, vsize_t len)
2168 {
2169 int idx, eidx;
2170
2171 #ifdef PMAP_DEBUG
2172 if ((va & PGOFSET) || (len & PGOFSET))
2173 panic("pmap_kremove: alignment");
2174 #endif
2175
2176 idx = m68k_btop(va - KERNBASE3X);
2177 eidx = m68k_btop(va + len - KERNBASE3X);
2178
2179 while (idx < eidx) {
2180 kernCbase[idx++].attr.raw = MMU_DT_INVALID;
2181 TBIS(va);
2182 va += PAGE_SIZE;
2183 }
2184 }
2185
2186 /* pmap_map INTERNAL
2187 **
2188 * Map a contiguous range of physical memory into a contiguous range of
2189 * the kernel virtual address space.
2190 *
2191 * Used for device mappings and early mapping of the kernel text/data/bss.
2192 * Returns the first virtual address beyond the end of the range.
2193 */
2194 vaddr_t
pmap_map(vaddr_t va,paddr_t pa,paddr_t endpa,int prot)2195 pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot)
2196 {
2197 int sz;
2198
2199 sz = endpa - pa;
2200 do {
2201 pmap_enter_kernel(va, pa, prot);
2202 va += PAGE_SIZE;
2203 pa += PAGE_SIZE;
2204 sz -= PAGE_SIZE;
2205 } while (sz > 0);
2206 pmap_update(pmap_kernel());
2207 return va;
2208 }
2209
2210 /* pmap_protect_kernel INTERNAL
2211 **
2212 * Apply the given protection code to a kernel address range.
2213 */
2214 static INLINE void
pmap_protect_kernel(vaddr_t startva,vaddr_t endva,vm_prot_t prot)2215 pmap_protect_kernel(vaddr_t startva, vaddr_t endva, vm_prot_t prot)
2216 {
2217 vaddr_t va;
2218 mmu_short_pte_t *pte;
2219
2220 pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE3X)];
2221 for (va = startva; va < endva; va += PAGE_SIZE, pte++) {
2222 if (MMU_VALID_DT(*pte)) {
2223 switch (prot) {
2224 case VM_PROT_ALL:
2225 break;
2226 case VM_PROT_EXECUTE:
2227 case VM_PROT_READ:
2228 case VM_PROT_READ|VM_PROT_EXECUTE:
2229 pte->attr.raw |= MMU_SHORT_PTE_WP;
2230 break;
2231 case VM_PROT_NONE:
2232 /* this is an alias for 'pmap_remove_kernel' */
2233 pmap_remove_pte(pte);
2234 break;
2235 default:
2236 break;
2237 }
2238 /*
2239 * since this is the kernel, immediately flush any cached
2240 * descriptors for this address.
2241 */
2242 TBIS(va);
2243 }
2244 }
2245 }
2246
2247 /* pmap_protect INTERFACE
2248 **
2249 * Apply the given protection to the given virtual address range within
2250 * the given map.
2251 *
2252 * It is ok for the protection applied to be stronger than what is
2253 * specified. We use this to our advantage when the given map has no
2254 * mapping for the virtual address. By skipping a page when this
2255 * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2256 * and therefore do not need to map the page just to apply a protection
2257 * code. Only pmap_enter() needs to create new mappings if they do not exist.
2258 *
2259 * XXX - This function could be speeded up by using pmap_stroll() for initial
2260 * setup, and then manual scrolling in the for() loop.
2261 */
2262 void
pmap_protect(pmap_t pmap,vaddr_t startva,vaddr_t endva,vm_prot_t prot)2263 pmap_protect(pmap_t pmap, vaddr_t startva, vaddr_t endva, vm_prot_t prot)
2264 {
2265 bool iscurpmap;
2266 int a_idx, b_idx, c_idx;
2267 a_tmgr_t *a_tbl;
2268 b_tmgr_t *b_tbl;
2269 c_tmgr_t *c_tbl;
2270 mmu_short_pte_t *pte;
2271
2272 if (pmap == pmap_kernel()) {
2273 pmap_protect_kernel(startva, endva, prot);
2274 return;
2275 }
2276
2277 /*
2278 * In this particular pmap implementation, there are only three
2279 * types of memory protection: 'all' (read/write/execute),
2280 * 'read-only' (read/execute) and 'none' (no mapping.)
2281 * It is not possible for us to treat 'executable' as a separate
2282 * protection type. Therefore, protection requests that seek to
2283 * remove execute permission while retaining read or write, and those
2284 * that make little sense (write-only for example) are ignored.
2285 */
2286 switch (prot) {
2287 case VM_PROT_NONE:
2288 /*
2289 * A request to apply the protection code of
2290 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2291 */
2292 pmap_remove(pmap, startva, endva);
2293 return;
2294 case VM_PROT_EXECUTE:
2295 case VM_PROT_READ:
2296 case VM_PROT_READ|VM_PROT_EXECUTE:
2297 /* continue */
2298 break;
2299 case VM_PROT_WRITE:
2300 case VM_PROT_WRITE|VM_PROT_READ:
2301 case VM_PROT_WRITE|VM_PROT_EXECUTE:
2302 case VM_PROT_ALL:
2303 /* None of these should happen in a sane system. */
2304 return;
2305 }
2306
2307 /*
2308 * If the pmap has no A table, it has no mappings and therefore
2309 * there is nothing to protect.
2310 */
2311 if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2312 return;
2313
2314 a_idx = MMU_TIA(startva);
2315 b_idx = MMU_TIB(startva);
2316 c_idx = MMU_TIC(startva);
2317 b_tbl = NULL;
2318 c_tbl = NULL;
2319
2320 iscurpmap = (pmap == current_pmap());
2321 while (startva < endva) {
2322 if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2323 if (b_tbl == NULL) {
2324 b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2325 b_tbl = mmu_ptov((vaddr_t)b_tbl);
2326 b_tbl = mmuB2tmgr((mmu_short_dte_t *)b_tbl);
2327 }
2328 if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2329 if (c_tbl == NULL) {
2330 c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2331 c_tbl = mmu_ptov((vaddr_t)c_tbl);
2332 c_tbl = mmuC2tmgr((mmu_short_pte_t *)c_tbl);
2333 }
2334 if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2335 pte = &c_tbl->ct_dtbl[c_idx];
2336 /* make the mapping read-only */
2337 pte->attr.raw |= MMU_SHORT_PTE_WP;
2338 /*
2339 * If we just modified the current address space,
2340 * flush any translations for the modified page from
2341 * the translation cache and any data from it in the
2342 * data cache.
2343 */
2344 if (iscurpmap)
2345 TBIS(startva);
2346 }
2347 startva += PAGE_SIZE;
2348
2349 if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2350 c_tbl = NULL;
2351 c_idx = 0;
2352 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2353 b_tbl = NULL;
2354 b_idx = 0;
2355 }
2356 }
2357 } else { /* C table wasn't valid */
2358 c_tbl = NULL;
2359 c_idx = 0;
2360 startva += MMU_TIB_RANGE;
2361 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2362 b_tbl = NULL;
2363 b_idx = 0;
2364 }
2365 } /* C table */
2366 } else { /* B table wasn't valid */
2367 b_tbl = NULL;
2368 b_idx = 0;
2369 startva += MMU_TIA_RANGE;
2370 a_idx++;
2371 } /* B table */
2372 }
2373 }
2374
2375 /* pmap_unwire INTERFACE
2376 **
2377 * Clear the wired attribute of the specified page.
2378 *
2379 * This function is called from vm_fault.c to unwire
2380 * a mapping.
2381 */
2382 void
pmap_unwire(pmap_t pmap,vaddr_t va)2383 pmap_unwire(pmap_t pmap, vaddr_t va)
2384 {
2385 int a_idx, b_idx, c_idx;
2386 a_tmgr_t *a_tbl;
2387 b_tmgr_t *b_tbl;
2388 c_tmgr_t *c_tbl;
2389 mmu_short_pte_t *pte;
2390
2391 /* Kernel mappings always remain wired. */
2392 if (pmap == pmap_kernel())
2393 return;
2394
2395 /*
2396 * Walk through the tables. If the walk terminates without
2397 * a valid PTE then the address wasn't wired in the first place.
2398 * Return immediately.
2399 */
2400 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2401 &b_idx, &c_idx) == false)
2402 return;
2403
2404
2405 /* Is the PTE wired? If not, return. */
2406 if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2407 return;
2408
2409 /* Remove the wiring bit. */
2410 pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2411
2412 /*
2413 * Decrement the wired entry count in the C table.
2414 * If it reaches zero the following things happen:
2415 * 1. The table no longer has any wired entries and is considered
2416 * unwired.
2417 * 2. It is placed on the available queue.
2418 * 3. The parent table's wired entry count is decremented.
2419 * 4. If it reaches zero, this process repeats at step 1 and
2420 * stops at after reaching the A table.
2421 */
2422 if (--c_tbl->ct_wcnt == 0) {
2423 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2424 if (--b_tbl->bt_wcnt == 0) {
2425 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2426 if (--a_tbl->at_wcnt == 0) {
2427 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2428 }
2429 }
2430 }
2431 }
2432
2433 /* pmap_copy INTERFACE
2434 **
2435 * Copy the mappings of a range of addresses in one pmap, into
2436 * the destination address of another.
2437 *
2438 * This routine is advisory. Should we one day decide that MMU tables
2439 * may be shared by more than one pmap, this function should be used to
2440 * link them together. Until that day however, we do nothing.
2441 */
2442 void
pmap_copy(pmap_t pmap_a,pmap_t pmap_b,vaddr_t dst,vsize_t len,vaddr_t src)2443 pmap_copy(pmap_t pmap_a, pmap_t pmap_b, vaddr_t dst, vsize_t len, vaddr_t src)
2444 {
2445
2446 /* not implemented. */
2447 }
2448
2449 /* pmap_copy_page INTERFACE
2450 **
2451 * Copy the contents of one physical page into another.
2452 *
2453 * This function makes use of two virtual pages allocated in pmap_bootstrap()
2454 * to map the two specified physical pages into the kernel address space.
2455 *
2456 * Note: We could use the transparent translation registers to make the
2457 * mappings. If we do so, be sure to disable interrupts before using them.
2458 */
2459 void
pmap_copy_page(paddr_t srcpa,paddr_t dstpa)2460 pmap_copy_page(paddr_t srcpa, paddr_t dstpa)
2461 {
2462 vaddr_t srcva, dstva;
2463 int s;
2464
2465 srcva = tmp_vpages[0];
2466 dstva = tmp_vpages[1];
2467
2468 s = splvm();
2469 #ifdef DIAGNOSTIC
2470 if (tmp_vpages_inuse++)
2471 panic("pmap_copy_page: temporary vpages are in use.");
2472 #endif
2473
2474 /* Map pages as non-cacheable to avoid cache polution? */
2475 pmap_kenter_pa(srcva, srcpa, VM_PROT_READ, 0);
2476 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE, 0);
2477
2478 /* Hand-optimized version of memcpy(dst, src, PAGE_SIZE) */
2479 copypage((char *)srcva, (char *)dstva);
2480
2481 pmap_kremove(srcva, PAGE_SIZE);
2482 pmap_kremove(dstva, PAGE_SIZE);
2483
2484 #ifdef DIAGNOSTIC
2485 --tmp_vpages_inuse;
2486 #endif
2487 splx(s);
2488 }
2489
2490 /* pmap_zero_page INTERFACE
2491 **
2492 * Zero the contents of the specified physical page.
2493 *
2494 * Uses one of the virtual pages allocated in pmap_bootstrap()
2495 * to map the specified page into the kernel address space.
2496 */
2497 void
pmap_zero_page(paddr_t dstpa)2498 pmap_zero_page(paddr_t dstpa)
2499 {
2500 vaddr_t dstva;
2501 int s;
2502
2503 dstva = tmp_vpages[1];
2504 s = splvm();
2505 #ifdef DIAGNOSTIC
2506 if (tmp_vpages_inuse++)
2507 panic("pmap_zero_page: temporary vpages are in use.");
2508 #endif
2509
2510 /* The comments in pmap_copy_page() above apply here also. */
2511 pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE, 0);
2512
2513 /* Hand-optimized version of memset(ptr, 0, PAGE_SIZE) */
2514 zeropage((char *)dstva);
2515
2516 pmap_kremove(dstva, PAGE_SIZE);
2517 #ifdef DIAGNOSTIC
2518 --tmp_vpages_inuse;
2519 #endif
2520 splx(s);
2521 }
2522
2523 /* pmap_pinit INTERNAL
2524 **
2525 * Initialize a pmap structure.
2526 */
2527 static INLINE void
pmap_pinit(pmap_t pmap)2528 pmap_pinit(pmap_t pmap)
2529 {
2530
2531 memset(pmap, 0, sizeof(struct pmap));
2532 pmap->pm_a_tmgr = NULL;
2533 pmap->pm_a_phys = kernAphys;
2534 pmap->pm_refcount = 1;
2535 }
2536
2537 /* pmap_create INTERFACE
2538 **
2539 * Create and return a pmap structure.
2540 */
2541 pmap_t
pmap_create(void)2542 pmap_create(void)
2543 {
2544 pmap_t pmap;
2545
2546 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
2547 pmap_pinit(pmap);
2548 return pmap;
2549 }
2550
2551 /* pmap_release INTERNAL
2552 **
2553 * Release any resources held by the given pmap.
2554 *
2555 * This is the reverse analog to pmap_pinit. It does not
2556 * necessarily mean for the pmap structure to be deallocated,
2557 * as in pmap_destroy.
2558 */
2559 static INLINE void
pmap_release(pmap_t pmap)2560 pmap_release(pmap_t pmap)
2561 {
2562
2563 /*
2564 * As long as the pmap contains no mappings,
2565 * which always should be the case whenever
2566 * this function is called, there really should
2567 * be nothing to do.
2568 */
2569 #ifdef PMAP_DEBUG
2570 if (pmap == pmap_kernel())
2571 panic("pmap_release: kernel pmap");
2572 #endif
2573 /*
2574 * XXX - If this pmap has an A table, give it back.
2575 * The pmap SHOULD be empty by now, and pmap_remove
2576 * should have already given back the A table...
2577 * However, I see: pmap->pm_a_tmgr->at_ecnt == 1
2578 * at this point, which means some mapping was not
2579 * removed when it should have been. -gwr
2580 */
2581 if (pmap->pm_a_tmgr != NULL) {
2582 /* First make sure we are not using it! */
2583 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2584 kernel_crp.rp_addr = kernAphys;
2585 loadcrp(&kernel_crp);
2586 }
2587 #ifdef PMAP_DEBUG /* XXX - todo! */
2588 /* XXX - Now complain... */
2589 printf("pmap_release: still have table\n");
2590 Debugger();
2591 #endif
2592 free_a_table(pmap->pm_a_tmgr, true);
2593 pmap->pm_a_tmgr = NULL;
2594 pmap->pm_a_phys = kernAphys;
2595 }
2596 }
2597
2598 /* pmap_reference INTERFACE
2599 **
2600 * Increment the reference count of a pmap.
2601 */
2602 void
pmap_reference(pmap_t pmap)2603 pmap_reference(pmap_t pmap)
2604 {
2605
2606 atomic_inc_uint(&pmap->pm_refcount);
2607 }
2608
2609 /* pmap_dereference INTERNAL
2610 **
2611 * Decrease the reference count on the given pmap
2612 * by one and return the current count.
2613 */
2614 static INLINE int
pmap_dereference(pmap_t pmap)2615 pmap_dereference(pmap_t pmap)
2616 {
2617 int rtn;
2618
2619 rtn = atomic_dec_uint_nv(&pmap->pm_refcount);
2620
2621 return rtn;
2622 }
2623
2624 /* pmap_destroy INTERFACE
2625 **
2626 * Decrement a pmap's reference count and delete
2627 * the pmap if it becomes zero. Will be called
2628 * only after all mappings have been removed.
2629 */
2630 void
pmap_destroy(pmap_t pmap)2631 pmap_destroy(pmap_t pmap)
2632 {
2633
2634 if (pmap_dereference(pmap) == 0) {
2635 pmap_release(pmap);
2636 pool_put(&pmap_pmap_pool, pmap);
2637 }
2638 }
2639
2640 /* pmap_is_referenced INTERFACE
2641 **
2642 * Determine if the given physical page has been
2643 * referenced (read from [or written to.])
2644 */
2645 bool
pmap_is_referenced(struct vm_page * pg)2646 pmap_is_referenced(struct vm_page *pg)
2647 {
2648 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2649 pv_t *pv;
2650 int idx;
2651
2652 /*
2653 * Check the flags on the pv head. If they are set,
2654 * return immediately. Otherwise a search must be done.
2655 */
2656
2657 pv = pa2pv(pa);
2658 if (pv->pv_flags & PV_FLAGS_USED)
2659 return true;
2660
2661 /*
2662 * Search through all pv elements pointing
2663 * to this page and query their reference bits
2664 */
2665
2666 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2667 if (MMU_PTE_USED(kernCbase[idx])) {
2668 return true;
2669 }
2670 }
2671 return false;
2672 }
2673
2674 /* pmap_is_modified INTERFACE
2675 **
2676 * Determine if the given physical page has been
2677 * modified (written to.)
2678 */
2679 bool
pmap_is_modified(struct vm_page * pg)2680 pmap_is_modified(struct vm_page *pg)
2681 {
2682 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2683 pv_t *pv;
2684 int idx;
2685
2686 /* see comments in pmap_is_referenced() */
2687 pv = pa2pv(pa);
2688 if (pv->pv_flags & PV_FLAGS_MDFY)
2689 return true;
2690
2691 for (idx = pv->pv_idx;
2692 idx != PVE_EOL;
2693 idx = pvebase[idx].pve_next) {
2694
2695 if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2696 return true;
2697 }
2698 }
2699
2700 return false;
2701 }
2702
2703 /* pmap_page_protect INTERFACE
2704 **
2705 * Applies the given protection to all mappings to the given
2706 * physical page.
2707 */
2708 void
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)2709 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2710 {
2711 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2712 pv_t *pv;
2713 int idx;
2714 vaddr_t va;
2715 struct mmu_short_pte_struct *pte;
2716 c_tmgr_t *c_tbl;
2717 pmap_t pmap, curpmap;
2718
2719 curpmap = current_pmap();
2720 pv = pa2pv(pa);
2721
2722 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2723 pte = &kernCbase[idx];
2724 switch (prot) {
2725 case VM_PROT_ALL:
2726 /* do nothing */
2727 break;
2728 case VM_PROT_EXECUTE:
2729 case VM_PROT_READ:
2730 case VM_PROT_READ|VM_PROT_EXECUTE:
2731 /*
2732 * Determine the virtual address mapped by
2733 * the PTE and flush ATC entries if necessary.
2734 */
2735 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2736 pte->attr.raw |= MMU_SHORT_PTE_WP;
2737 if (pmap == curpmap || pmap == pmap_kernel())
2738 TBIS(va);
2739 break;
2740 case VM_PROT_NONE:
2741 /* Save the mod/ref bits. */
2742 pv->pv_flags |= pte->attr.raw;
2743 /* Invalidate the PTE. */
2744 pte->attr.raw = MMU_DT_INVALID;
2745
2746 /*
2747 * Update table counts. And flush ATC entries
2748 * if necessary.
2749 */
2750 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2751
2752 /*
2753 * If the PTE belongs to the kernel map,
2754 * be sure to flush the page it maps.
2755 */
2756 if (pmap == pmap_kernel()) {
2757 TBIS(va);
2758 } else {
2759 /*
2760 * The PTE belongs to a user map.
2761 * update the entry count in the C
2762 * table to which it belongs and flush
2763 * the ATC if the mapping belongs to
2764 * the current pmap.
2765 */
2766 c_tbl->ct_ecnt--;
2767 if (pmap == curpmap)
2768 TBIS(va);
2769 }
2770 break;
2771 default:
2772 break;
2773 }
2774 }
2775
2776 /*
2777 * If the protection code indicates that all mappings to the page
2778 * be removed, truncate the PV list to zero entries.
2779 */
2780 if (prot == VM_PROT_NONE)
2781 pv->pv_idx = PVE_EOL;
2782 }
2783
2784 /* pmap_get_pteinfo INTERNAL
2785 **
2786 * Called internally to find the pmap and virtual address within that
2787 * map to which the pte at the given index maps. Also includes the PTE's C
2788 * table manager.
2789 *
2790 * Returns the pmap in the argument provided, and the virtual address
2791 * by return value.
2792 */
2793 vaddr_t
pmap_get_pteinfo(u_int idx,pmap_t * pmap,c_tmgr_t ** tbl)2794 pmap_get_pteinfo(u_int idx, pmap_t *pmap, c_tmgr_t **tbl)
2795 {
2796 vaddr_t va = 0;
2797
2798 /*
2799 * Determine if the PTE is a kernel PTE or a user PTE.
2800 */
2801 if (idx >= NUM_KERN_PTES) {
2802 /*
2803 * The PTE belongs to a user mapping.
2804 */
2805 /* XXX: Would like an inline for this to validate idx... */
2806 *tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2807
2808 *pmap = (*tbl)->ct_pmap;
2809 /*
2810 * To find the va to which the PTE maps, we first take
2811 * the table's base virtual address mapping which is stored
2812 * in ct_va. We then increment this address by a page for
2813 * every slot skipped until we reach the PTE.
2814 */
2815 va = (*tbl)->ct_va;
2816 va += m68k_ptob(idx % MMU_C_TBL_SIZE);
2817 } else {
2818 /*
2819 * The PTE belongs to the kernel map.
2820 */
2821 *pmap = pmap_kernel();
2822
2823 va = m68k_ptob(idx);
2824 va += KERNBASE3X;
2825 }
2826
2827 return va;
2828 }
2829
2830 /* pmap_clear_modify INTERFACE
2831 **
2832 * Clear the modification bit on the page at the specified
2833 * physical address.
2834 *
2835 */
2836 bool
pmap_clear_modify(struct vm_page * pg)2837 pmap_clear_modify(struct vm_page *pg)
2838 {
2839 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2840 bool rv;
2841
2842 rv = pmap_is_modified(pg);
2843 pmap_clear_pv(pa, PV_FLAGS_MDFY);
2844 return rv;
2845 }
2846
2847 /* pmap_clear_reference INTERFACE
2848 **
2849 * Clear the referenced bit on the page at the specified
2850 * physical address.
2851 */
2852 bool
pmap_clear_reference(struct vm_page * pg)2853 pmap_clear_reference(struct vm_page *pg)
2854 {
2855 paddr_t pa = VM_PAGE_TO_PHYS(pg);
2856 bool rv;
2857
2858 rv = pmap_is_referenced(pg);
2859 pmap_clear_pv(pa, PV_FLAGS_USED);
2860 return rv;
2861 }
2862
2863 /* pmap_clear_pv INTERNAL
2864 **
2865 * Clears the specified flag from the specified physical address.
2866 * (Used by pmap_clear_modify() and pmap_clear_reference().)
2867 *
2868 * Flag is one of:
2869 * PV_FLAGS_MDFY - Page modified bit.
2870 * PV_FLAGS_USED - Page used (referenced) bit.
2871 *
2872 * This routine must not only clear the flag on the pv list
2873 * head. It must also clear the bit on every pte in the pv
2874 * list associated with the address.
2875 */
2876 void
pmap_clear_pv(paddr_t pa,int flag)2877 pmap_clear_pv(paddr_t pa, int flag)
2878 {
2879 pv_t *pv;
2880 int idx;
2881 vaddr_t va;
2882 pmap_t pmap;
2883 mmu_short_pte_t *pte;
2884 c_tmgr_t *c_tbl;
2885
2886 pv = pa2pv(pa);
2887 pv->pv_flags &= ~(flag);
2888 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2889 pte = &kernCbase[idx];
2890 pte->attr.raw &= ~(flag);
2891
2892 /*
2893 * The MC68030 MMU will not set the modified or
2894 * referenced bits on any MMU tables for which it has
2895 * a cached descriptor with its modify bit set. To insure
2896 * that it will modify these bits on the PTE during the next
2897 * time it is written to or read from, we must flush it from
2898 * the ATC.
2899 *
2900 * Ordinarily it is only necessary to flush the descriptor
2901 * if it is used in the current address space. But since I
2902 * am not sure that there will always be a notion of
2903 * 'the current address space' when this function is called,
2904 * I will skip the test and always flush the address. It
2905 * does no harm.
2906 */
2907
2908 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2909 TBIS(va);
2910 }
2911 }
2912
2913 /* pmap_extract_kernel INTERNAL
2914 **
2915 * Extract a translation from the kernel address space.
2916 */
2917 static INLINE bool
pmap_extract_kernel(vaddr_t va,paddr_t * pap)2918 pmap_extract_kernel(vaddr_t va, paddr_t *pap)
2919 {
2920 mmu_short_pte_t *pte;
2921
2922 pte = &kernCbase[(u_int)m68k_btop(va - KERNBASE3X)];
2923 if (!MMU_VALID_DT(*pte))
2924 return false;
2925 if (pap != NULL)
2926 *pap = MMU_PTE_PA(*pte);
2927 return true;
2928 }
2929
2930 /* pmap_extract INTERFACE
2931 **
2932 * Return the physical address mapped by the virtual address
2933 * in the specified pmap.
2934 *
2935 * Note: this function should also apply an exclusive lock
2936 * on the pmap system during its duration.
2937 */
2938 bool
pmap_extract(pmap_t pmap,vaddr_t va,paddr_t * pap)2939 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
2940 {
2941 int a_idx, b_idx, pte_idx;
2942 a_tmgr_t *a_tbl;
2943 b_tmgr_t *b_tbl;
2944 c_tmgr_t *c_tbl;
2945 mmu_short_pte_t *c_pte;
2946
2947 if (pmap == pmap_kernel())
2948 return pmap_extract_kernel(va, pap);
2949
2950 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
2951 &c_pte, &a_idx, &b_idx, &pte_idx) == false)
2952 return false;
2953
2954 if (!MMU_VALID_DT(*c_pte))
2955 return false;
2956
2957 if (pap != NULL)
2958 *pap = MMU_PTE_PA(*c_pte);
2959 return true;
2960 }
2961
2962 /* pmap_remove_kernel INTERNAL
2963 **
2964 * Remove the mapping of a range of virtual addresses from the kernel map.
2965 * The arguments are already page-aligned.
2966 */
2967 static INLINE void
pmap_remove_kernel(vaddr_t sva,vaddr_t eva)2968 pmap_remove_kernel(vaddr_t sva, vaddr_t eva)
2969 {
2970 int idx, eidx;
2971
2972 #ifdef PMAP_DEBUG
2973 if ((sva & PGOFSET) || (eva & PGOFSET))
2974 panic("pmap_remove_kernel: alignment");
2975 #endif
2976
2977 idx = m68k_btop(sva - KERNBASE3X);
2978 eidx = m68k_btop(eva - KERNBASE3X);
2979
2980 while (idx < eidx) {
2981 pmap_remove_pte(&kernCbase[idx++]);
2982 TBIS(sva);
2983 sva += PAGE_SIZE;
2984 }
2985 }
2986
2987 /* pmap_remove INTERFACE
2988 **
2989 * Remove the mapping of a range of virtual addresses from the given pmap.
2990 *
2991 */
2992 void
pmap_remove(pmap_t pmap,vaddr_t sva,vaddr_t eva)2993 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
2994 {
2995
2996 if (pmap == pmap_kernel()) {
2997 pmap_remove_kernel(sva, eva);
2998 return;
2999 }
3000
3001 /*
3002 * If the pmap doesn't have an A table of its own, it has no mappings
3003 * that can be removed.
3004 */
3005 if (pmap->pm_a_tmgr == NULL)
3006 return;
3007
3008 /*
3009 * Remove the specified range from the pmap. If the function
3010 * returns true, the operation removed all the valid mappings
3011 * in the pmap and freed its A table. If this happened to the
3012 * currently loaded pmap, the MMU root pointer must be reloaded
3013 * with the default 'kernel' map.
3014 */
3015 if (pmap_remove_a(pmap->pm_a_tmgr, sva, eva)) {
3016 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
3017 kernel_crp.rp_addr = kernAphys;
3018 loadcrp(&kernel_crp);
3019 /* will do TLB flush below */
3020 }
3021 pmap->pm_a_tmgr = NULL;
3022 pmap->pm_a_phys = kernAphys;
3023 }
3024
3025 /*
3026 * If we just modified the current address space,
3027 * make sure to flush the MMU cache.
3028 *
3029 * XXX - this could be an unnecessarily large flush.
3030 * XXX - Could decide, based on the size of the VA range
3031 * to be removed, whether to flush "by pages" or "all".
3032 */
3033 if (pmap == current_pmap())
3034 TBIAU();
3035 }
3036
3037 /* pmap_remove_a INTERNAL
3038 **
3039 * This is function number one in a set of three that removes a range
3040 * of memory in the most efficient manner by removing the highest possible
3041 * tables from the memory space. This particular function attempts to remove
3042 * as many B tables as it can, delegating the remaining fragmented ranges to
3043 * pmap_remove_b().
3044 *
3045 * If the removal operation results in an empty A table, the function returns
3046 * true.
3047 *
3048 * It's ugly but will do for now.
3049 */
3050 bool
pmap_remove_a(a_tmgr_t * a_tbl,vaddr_t sva,vaddr_t eva)3051 pmap_remove_a(a_tmgr_t *a_tbl, vaddr_t sva, vaddr_t eva)
3052 {
3053 bool empty;
3054 int idx;
3055 vaddr_t nstart, nend;
3056 b_tmgr_t *b_tbl;
3057 mmu_long_dte_t *a_dte;
3058 mmu_short_dte_t *b_dte;
3059 uint8_t at_wired, bt_wired;
3060
3061 /*
3062 * The following code works with what I call a 'granularity
3063 * reduction algorithm'. A range of addresses will always have
3064 * the following properties, which are classified according to
3065 * how the range relates to the size of the current granularity
3066 * - an A table entry:
3067 *
3068 * 1 2 3 4
3069 * -+---+---+---+---+---+---+---+-
3070 * -+---+---+---+---+---+---+---+-
3071 *
3072 * A range will always start on a granularity boundary, illustrated
3073 * by '+' signs in the table above, or it will start at some point
3074 * in-between a granularity boundary, as illustrated by point 1.
3075 * The first step in removing a range of addresses is to remove the
3076 * range between 1 and 2, the nearest granularity boundary. This
3077 * job is handled by the section of code governed by the
3078 * 'if (start < nstart)' statement.
3079 *
3080 * A range will always encompass zero or more integral granules,
3081 * illustrated by points 2 and 3. Integral granules are easy to
3082 * remove. The removal of these granules is the second step, and
3083 * is handled by the code block 'if (nstart < nend)'.
3084 *
3085 * Lastly, a range will always end on a granularity boundary,
3086 * ill. by point 3, or it will fall just beyond one, ill. by point
3087 * 4. The last step involves removing this range and is handled by
3088 * the code block 'if (nend < end)'.
3089 */
3090 nstart = MMU_ROUND_UP_A(sva);
3091 nend = MMU_ROUND_A(eva);
3092
3093 at_wired = a_tbl->at_wcnt;
3094
3095 if (sva < nstart) {
3096 /*
3097 * This block is executed if the range starts between
3098 * a granularity boundary.
3099 *
3100 * First find the DTE which is responsible for mapping
3101 * the start of the range.
3102 */
3103 idx = MMU_TIA(sva);
3104 a_dte = &a_tbl->at_dtbl[idx];
3105
3106 /*
3107 * If the DTE is valid then delegate the removal of the sub
3108 * range to pmap_remove_b(), which can remove addresses at
3109 * a finer granularity.
3110 */
3111 if (MMU_VALID_DT(*a_dte)) {
3112 b_dte = mmu_ptov(a_dte->addr.raw);
3113 b_tbl = mmuB2tmgr(b_dte);
3114 bt_wired = b_tbl->bt_wcnt;
3115
3116 /*
3117 * The sub range to be removed starts at the start
3118 * of the full range we were asked to remove, and ends
3119 * at the greater of:
3120 * 1. The end of the full range, -or-
3121 * 2. The end of the full range, rounded down to the
3122 * nearest granularity boundary.
3123 */
3124 if (eva < nstart)
3125 empty = pmap_remove_b(b_tbl, sva, eva);
3126 else
3127 empty = pmap_remove_b(b_tbl, sva, nstart);
3128
3129 /*
3130 * If the child table no longer has wired entries,
3131 * decrement wired entry count.
3132 */
3133 if (bt_wired && b_tbl->bt_wcnt == 0)
3134 a_tbl->at_wcnt--;
3135
3136 /*
3137 * If the removal resulted in an empty B table,
3138 * invalidate the DTE that points to it and decrement
3139 * the valid entry count of the A table.
3140 */
3141 if (empty) {
3142 a_dte->attr.raw = MMU_DT_INVALID;
3143 a_tbl->at_ecnt--;
3144 }
3145 }
3146 /*
3147 * If the DTE is invalid, the address range is already non-
3148 * existent and can simply be skipped.
3149 */
3150 }
3151 if (nstart < nend) {
3152 /*
3153 * This block is executed if the range spans a whole number
3154 * multiple of granules (A table entries.)
3155 *
3156 * First find the DTE which is responsible for mapping
3157 * the start of the first granule involved.
3158 */
3159 idx = MMU_TIA(nstart);
3160 a_dte = &a_tbl->at_dtbl[idx];
3161
3162 /*
3163 * Remove entire sub-granules (B tables) one at a time,
3164 * until reaching the end of the range.
3165 */
3166 for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3167 if (MMU_VALID_DT(*a_dte)) {
3168 /*
3169 * Find the B table manager for the
3170 * entry and free it.
3171 */
3172 b_dte = mmu_ptov(a_dte->addr.raw);
3173 b_tbl = mmuB2tmgr(b_dte);
3174 bt_wired = b_tbl->bt_wcnt;
3175
3176 free_b_table(b_tbl, true);
3177
3178 /*
3179 * All child entries has been removed.
3180 * If there were any wired entries in it,
3181 * decrement wired entry count.
3182 */
3183 if (bt_wired)
3184 a_tbl->at_wcnt--;
3185
3186 /*
3187 * Invalidate the DTE that points to the
3188 * B table and decrement the valid entry
3189 * count of the A table.
3190 */
3191 a_dte->attr.raw = MMU_DT_INVALID;
3192 a_tbl->at_ecnt--;
3193 }
3194 }
3195 if (nend < eva) {
3196 /*
3197 * This block is executed if the range ends beyond a
3198 * granularity boundary.
3199 *
3200 * First find the DTE which is responsible for mapping
3201 * the start of the nearest (rounded down) granularity
3202 * boundary.
3203 */
3204 idx = MMU_TIA(nend);
3205 a_dte = &a_tbl->at_dtbl[idx];
3206
3207 /*
3208 * If the DTE is valid then delegate the removal of the sub
3209 * range to pmap_remove_b(), which can remove addresses at
3210 * a finer granularity.
3211 */
3212 if (MMU_VALID_DT(*a_dte)) {
3213 /*
3214 * Find the B table manager for the entry
3215 * and hand it to pmap_remove_b() along with
3216 * the sub range.
3217 */
3218 b_dte = mmu_ptov(a_dte->addr.raw);
3219 b_tbl = mmuB2tmgr(b_dte);
3220 bt_wired = b_tbl->bt_wcnt;
3221
3222 empty = pmap_remove_b(b_tbl, nend, eva);
3223
3224 /*
3225 * If the child table no longer has wired entries,
3226 * decrement wired entry count.
3227 */
3228 if (bt_wired && b_tbl->bt_wcnt == 0)
3229 a_tbl->at_wcnt--;
3230 /*
3231 * If the removal resulted in an empty B table,
3232 * invalidate the DTE that points to it and decrement
3233 * the valid entry count of the A table.
3234 */
3235 if (empty) {
3236 a_dte->attr.raw = MMU_DT_INVALID;
3237 a_tbl->at_ecnt--;
3238 }
3239 }
3240 }
3241
3242 /*
3243 * If there are no more entries in the A table, release it
3244 * back to the available pool and return true.
3245 */
3246 if (a_tbl->at_ecnt == 0) {
3247 KASSERT(a_tbl->at_wcnt == 0);
3248 a_tbl->at_parent = NULL;
3249 if (!at_wired)
3250 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3251 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3252 empty = true;
3253 } else {
3254 /*
3255 * If the table doesn't have wired entries any longer
3256 * but still has unwired entries, put it back into
3257 * the available queue.
3258 */
3259 if (at_wired && a_tbl->at_wcnt == 0)
3260 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
3261 empty = false;
3262 }
3263
3264 return empty;
3265 }
3266
3267 /* pmap_remove_b INTERNAL
3268 **
3269 * Remove a range of addresses from an address space, trying to remove entire
3270 * C tables if possible.
3271 *
3272 * If the operation results in an empty B table, the function returns true.
3273 */
3274 bool
pmap_remove_b(b_tmgr_t * b_tbl,vaddr_t sva,vaddr_t eva)3275 pmap_remove_b(b_tmgr_t *b_tbl, vaddr_t sva, vaddr_t eva)
3276 {
3277 bool empty;
3278 int idx;
3279 vaddr_t nstart, nend, rstart;
3280 c_tmgr_t *c_tbl;
3281 mmu_short_dte_t *b_dte;
3282 mmu_short_pte_t *c_dte;
3283 uint8_t bt_wired, ct_wired;
3284
3285 nstart = MMU_ROUND_UP_B(sva);
3286 nend = MMU_ROUND_B(eva);
3287
3288 bt_wired = b_tbl->bt_wcnt;
3289
3290 if (sva < nstart) {
3291 idx = MMU_TIB(sva);
3292 b_dte = &b_tbl->bt_dtbl[idx];
3293 if (MMU_VALID_DT(*b_dte)) {
3294 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3295 c_tbl = mmuC2tmgr(c_dte);
3296 ct_wired = c_tbl->ct_wcnt;
3297
3298 if (eva < nstart)
3299 empty = pmap_remove_c(c_tbl, sva, eva);
3300 else
3301 empty = pmap_remove_c(c_tbl, sva, nstart);
3302
3303 /*
3304 * If the child table no longer has wired entries,
3305 * decrement wired entry count.
3306 */
3307 if (ct_wired && c_tbl->ct_wcnt == 0)
3308 b_tbl->bt_wcnt--;
3309
3310 if (empty) {
3311 b_dte->attr.raw = MMU_DT_INVALID;
3312 b_tbl->bt_ecnt--;
3313 }
3314 }
3315 }
3316 if (nstart < nend) {
3317 idx = MMU_TIB(nstart);
3318 b_dte = &b_tbl->bt_dtbl[idx];
3319 rstart = nstart;
3320 while (rstart < nend) {
3321 if (MMU_VALID_DT(*b_dte)) {
3322 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3323 c_tbl = mmuC2tmgr(c_dte);
3324 ct_wired = c_tbl->ct_wcnt;
3325
3326 free_c_table(c_tbl, true);
3327
3328 /*
3329 * All child entries has been removed.
3330 * If there were any wired entries in it,
3331 * decrement wired entry count.
3332 */
3333 if (ct_wired)
3334 b_tbl->bt_wcnt--;
3335
3336 b_dte->attr.raw = MMU_DT_INVALID;
3337 b_tbl->bt_ecnt--;
3338 }
3339 b_dte++;
3340 rstart += MMU_TIB_RANGE;
3341 }
3342 }
3343 if (nend < eva) {
3344 idx = MMU_TIB(nend);
3345 b_dte = &b_tbl->bt_dtbl[idx];
3346 if (MMU_VALID_DT(*b_dte)) {
3347 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3348 c_tbl = mmuC2tmgr(c_dte);
3349 ct_wired = c_tbl->ct_wcnt;
3350 empty = pmap_remove_c(c_tbl, nend, eva);
3351
3352 /*
3353 * If the child table no longer has wired entries,
3354 * decrement wired entry count.
3355 */
3356 if (ct_wired && c_tbl->ct_wcnt == 0)
3357 b_tbl->bt_wcnt--;
3358
3359 if (empty) {
3360 b_dte->attr.raw = MMU_DT_INVALID;
3361 b_tbl->bt_ecnt--;
3362 }
3363 }
3364 }
3365
3366 if (b_tbl->bt_ecnt == 0) {
3367 KASSERT(b_tbl->bt_wcnt == 0);
3368 b_tbl->bt_parent = NULL;
3369 if (!bt_wired)
3370 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3371 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3372 empty = true;
3373 } else {
3374 /*
3375 * If the table doesn't have wired entries any longer
3376 * but still has unwired entries, put it back into
3377 * the available queue.
3378 */
3379 if (bt_wired && b_tbl->bt_wcnt == 0)
3380 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
3381
3382 empty = false;
3383 }
3384
3385 return empty;
3386 }
3387
3388 /* pmap_remove_c INTERNAL
3389 **
3390 * Remove a range of addresses from the given C table.
3391 */
3392 bool
pmap_remove_c(c_tmgr_t * c_tbl,vaddr_t sva,vaddr_t eva)3393 pmap_remove_c(c_tmgr_t *c_tbl, vaddr_t sva, vaddr_t eva)
3394 {
3395 bool empty;
3396 int idx;
3397 mmu_short_pte_t *c_pte;
3398 uint8_t ct_wired;
3399
3400 ct_wired = c_tbl->ct_wcnt;
3401
3402 idx = MMU_TIC(sva);
3403 c_pte = &c_tbl->ct_dtbl[idx];
3404 for (; sva < eva; sva += MMU_PAGE_SIZE, c_pte++) {
3405 if (MMU_VALID_DT(*c_pte)) {
3406 if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)
3407 c_tbl->ct_wcnt--;
3408 pmap_remove_pte(c_pte);
3409 c_tbl->ct_ecnt--;
3410 }
3411 }
3412
3413 if (c_tbl->ct_ecnt == 0) {
3414 KASSERT(c_tbl->ct_wcnt == 0);
3415 c_tbl->ct_parent = NULL;
3416 if (!ct_wired)
3417 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3418 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3419 empty = true;
3420 } else {
3421 /*
3422 * If the table doesn't have wired entries any longer
3423 * but still has unwired entries, put it back into
3424 * the available queue.
3425 */
3426 if (ct_wired && c_tbl->ct_wcnt == 0)
3427 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
3428 empty = false;
3429 }
3430
3431 return empty;
3432 }
3433
3434 /* pmap_bootstrap_alloc INTERNAL
3435 **
3436 * Used internally for memory allocation at startup when malloc is not
3437 * available. This code will fail once it crosses the first memory
3438 * bank boundary on the 3/80. Hopefully by then however, the VM system
3439 * will be in charge of allocation.
3440 */
3441 void *
pmap_bootstrap_alloc(int size)3442 pmap_bootstrap_alloc(int size)
3443 {
3444 void *rtn;
3445
3446 #ifdef PMAP_DEBUG
3447 if (bootstrap_alloc_enabled == false) {
3448 mon_printf("pmap_bootstrap_alloc: disabled\n");
3449 sunmon_abort();
3450 }
3451 #endif
3452
3453 rtn = (void *) virtual_avail;
3454 virtual_avail += size;
3455
3456 #ifdef PMAP_DEBUG
3457 if (virtual_avail > virtual_contig_end) {
3458 mon_printf("pmap_bootstrap_alloc: out of mem\n");
3459 sunmon_abort();
3460 }
3461 #endif
3462
3463 return rtn;
3464 }
3465
3466 /* pmap_bootstap_aalign INTERNAL
3467 **
3468 * Used to insure that the next call to pmap_bootstrap_alloc() will
3469 * return a chunk of memory aligned to the specified size.
3470 *
3471 * Note: This function will only support alignment sizes that are powers
3472 * of two.
3473 */
3474 void
pmap_bootstrap_aalign(int size)3475 pmap_bootstrap_aalign(int size)
3476 {
3477 int off;
3478
3479 off = virtual_avail & (size - 1);
3480 if (off) {
3481 (void)pmap_bootstrap_alloc(size - off);
3482 }
3483 }
3484
3485 /* pmap_pa_exists
3486 **
3487 * Used by the /dev/mem driver to see if a given PA is memory
3488 * that can be mapped. (The PA is not in a hole.)
3489 */
3490 int
pmap_pa_exists(paddr_t pa)3491 pmap_pa_exists(paddr_t pa)
3492 {
3493 int i;
3494
3495 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3496 if ((pa >= avail_mem[i].pmem_start) &&
3497 (pa < avail_mem[i].pmem_end))
3498 return 1;
3499 if (avail_mem[i].pmem_next == NULL)
3500 break;
3501 }
3502 return 0;
3503 }
3504
3505 /* Called only from locore.s and pmap.c */
3506 void _pmap_switch(pmap_t pmap);
3507
3508 /*
3509 * _pmap_switch INTERNAL
3510 *
3511 * This is called by locore.s:cpu_switch() when it is
3512 * switching to a new process. Load new translations.
3513 * Note: done in-line by locore.s unless PMAP_DEBUG
3514 *
3515 * Note that we do NOT allocate a context here, but
3516 * share the "kernel only" context until we really
3517 * need our own context for user-space mappings in
3518 * pmap_enter_user(). [ s/context/mmu A table/ ]
3519 */
3520 void
_pmap_switch(pmap_t pmap)3521 _pmap_switch(pmap_t pmap)
3522 {
3523 u_long rootpa;
3524
3525 /*
3526 * Only do reload/flush if we have to.
3527 * Note that if the old and new process
3528 * were BOTH using the "null" context,
3529 * then this will NOT flush the TLB.
3530 */
3531 rootpa = pmap->pm_a_phys;
3532 if (kernel_crp.rp_addr != rootpa) {
3533 DPRINT(("pmap_activate(%p)\n", pmap));
3534 kernel_crp.rp_addr = rootpa;
3535 loadcrp(&kernel_crp);
3536 TBIAU();
3537 }
3538 }
3539
3540 /*
3541 * Exported version of pmap_activate(). This is called from the
3542 * machine-independent VM code when a process is given a new pmap.
3543 * If (p == curlwp) do like cpu_switch would do; otherwise just
3544 * take this as notification that the process has a new pmap.
3545 */
3546 void
pmap_activate(struct lwp * l)3547 pmap_activate(struct lwp *l)
3548 {
3549
3550 if (l->l_proc == curproc) {
3551 _pmap_switch(l->l_proc->p_vmspace->vm_map.pmap);
3552 }
3553 }
3554
3555 /*
3556 * pmap_deactivate INTERFACE
3557 **
3558 * This is called to deactivate the specified process's address space.
3559 */
3560 void
pmap_deactivate(struct lwp * l)3561 pmap_deactivate(struct lwp *l)
3562 {
3563
3564 /* Nothing to do. */
3565 }
3566
3567 /*
3568 * Fill in the sun3x-specific part of the kernel core header
3569 * for dumpsys(). (See machdep.c for the rest.)
3570 */
3571 void
pmap_kcore_hdr(struct sun3x_kcore_hdr * sh)3572 pmap_kcore_hdr(struct sun3x_kcore_hdr *sh)
3573 {
3574 u_long spa, len;
3575 int i;
3576
3577 sh->pg_frame = MMU_SHORT_PTE_BASEADDR;
3578 sh->pg_valid = MMU_DT_PAGE;
3579 sh->contig_end = virtual_contig_end;
3580 sh->kernCbase = (u_long)kernCbase;
3581 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3582 spa = avail_mem[i].pmem_start;
3583 spa = m68k_trunc_page(spa);
3584 len = avail_mem[i].pmem_end - spa;
3585 len = m68k_round_page(len);
3586 sh->ram_segs[i].start = spa;
3587 sh->ram_segs[i].size = len;
3588 }
3589 }
3590
3591
3592 /* pmap_virtual_space INTERFACE
3593 **
3594 * Return the current available range of virtual addresses in the
3595 * arguments provided. Only really called once.
3596 */
3597 void
pmap_virtual_space(vaddr_t * vstart,vaddr_t * vend)3598 pmap_virtual_space(vaddr_t *vstart, vaddr_t *vend)
3599 {
3600
3601 *vstart = virtual_avail;
3602 *vend = virtual_end;
3603 }
3604
3605 /*
3606 * Provide memory to the VM system.
3607 *
3608 * Assume avail_start is always in the
3609 * first segment as pmap_bootstrap does.
3610 */
3611 static void
pmap_page_upload(void)3612 pmap_page_upload(void)
3613 {
3614 paddr_t a, b; /* memory range */
3615 int i;
3616
3617 /* Supply the memory in segments. */
3618 for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3619 a = atop(avail_mem[i].pmem_start);
3620 b = atop(avail_mem[i].pmem_end);
3621 if (i == 0)
3622 a = atop(avail_start);
3623 if (avail_mem[i].pmem_end > avail_end)
3624 b = atop(avail_end);
3625
3626 uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
3627
3628 if (avail_mem[i].pmem_next == NULL)
3629 break;
3630 }
3631 }
3632
3633 /* pmap_count INTERFACE
3634 **
3635 * Return the number of resident (valid) pages in the given pmap.
3636 *
3637 * Note: If this function is handed the kernel map, it will report
3638 * that it has no mappings. Hopefully the VM system won't ask for kernel
3639 * map statistics.
3640 */
3641 segsz_t
pmap_count(pmap_t pmap,int type)3642 pmap_count(pmap_t pmap, int type)
3643 {
3644 u_int count;
3645 int a_idx, b_idx;
3646 a_tmgr_t *a_tbl;
3647 b_tmgr_t *b_tbl;
3648 c_tmgr_t *c_tbl;
3649
3650 /*
3651 * If the pmap does not have its own A table manager, it has no
3652 * valid entries.
3653 */
3654 if (pmap->pm_a_tmgr == NULL)
3655 return 0;
3656
3657 a_tbl = pmap->pm_a_tmgr;
3658
3659 count = 0;
3660 for (a_idx = 0; a_idx < MMU_TIA(KERNBASE3X); a_idx++) {
3661 if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3662 b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3663 for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3664 if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3665 c_tbl = mmuC2tmgr(
3666 mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3667 if (type == 0)
3668 /*
3669 * A resident entry count has been requested.
3670 */
3671 count += c_tbl->ct_ecnt;
3672 else
3673 /*
3674 * A wired entry count has been requested.
3675 */
3676 count += c_tbl->ct_wcnt;
3677 }
3678 }
3679 }
3680 }
3681
3682 return count;
3683 }
3684
3685 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3686 * The following routines are only used by DDB for tricky kernel text *
3687 * text operations in db_memrw.c. They are provided for sun3 *
3688 * compatibility. *
3689 *************************************************************************/
3690 /* get_pte INTERNAL
3691 **
3692 * Return the page descriptor the describes the kernel mapping
3693 * of the given virtual address.
3694 */
3695 extern u_long ptest_addr(u_long); /* XXX: locore.s */
3696 u_int
get_pte(vaddr_t va)3697 get_pte(vaddr_t va)
3698 {
3699 u_long pte_pa;
3700 mmu_short_pte_t *pte;
3701
3702 /* Get the physical address of the PTE */
3703 pte_pa = ptest_addr(va & ~PGOFSET);
3704
3705 /* Convert to a virtual address... */
3706 pte = (mmu_short_pte_t *) (KERNBASE3X + pte_pa);
3707
3708 /* Make sure it is in our level-C tables... */
3709 if ((pte < kernCbase) ||
3710 (pte >= &mmuCbase[NUM_USER_PTES]))
3711 return 0;
3712
3713 /* ... and just return its contents. */
3714 return (pte->attr.raw);
3715 }
3716
3717
3718 /* set_pte INTERNAL
3719 **
3720 * Set the page descriptor that describes the kernel mapping
3721 * of the given virtual address.
3722 */
3723 void
set_pte(vaddr_t va,u_int pte)3724 set_pte(vaddr_t va, u_int pte)
3725 {
3726 u_long idx;
3727
3728 if (va < KERNBASE3X)
3729 return;
3730
3731 idx = (unsigned long) m68k_btop(va - KERNBASE3X);
3732 kernCbase[idx].attr.raw = pte;
3733 TBIS(va);
3734 }
3735
3736 /*
3737 * Routine: pmap_procwr
3738 *
3739 * Function:
3740 * Synchronize caches corresponding to [addr, addr+len) in p.
3741 */
3742 void
pmap_procwr(struct proc * p,vaddr_t va,size_t len)3743 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
3744 {
3745
3746 (void)cachectl1(0x80000004, va, len, p);
3747 }
3748
3749
3750 #ifdef PMAP_DEBUG
3751 /************************** DEBUGGING ROUTINES **************************
3752 * The following routines are meant to be an aid to debugging the pmap *
3753 * system. They are callable from the DDB command line and should be *
3754 * prepared to be handed unstable or incomplete states of the system. *
3755 ************************************************************************/
3756
3757 /* pv_list
3758 **
3759 * List all pages found on the pv list for the given physical page.
3760 * To avoid endless loops, the listing will stop at the end of the list
3761 * or after 'n' entries - whichever comes first.
3762 */
3763 void
pv_list(paddr_t pa,int n)3764 pv_list(paddr_t pa, int n)
3765 {
3766 int idx;
3767 vaddr_t va;
3768 pv_t *pv;
3769 c_tmgr_t *c_tbl;
3770 pmap_t pmap;
3771
3772 pv = pa2pv(pa);
3773 idx = pv->pv_idx;
3774 for (; idx != PVE_EOL && n > 0; idx = pvebase[idx].pve_next, n--) {
3775 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3776 printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3777 idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3778 }
3779 }
3780 #endif /* PMAP_DEBUG */
3781
3782 #ifdef NOT_YET
3783 /* and maybe not ever */
3784 /************************** LOW-LEVEL ROUTINES **************************
3785 * These routines will eventually be re-written into assembly and placed*
3786 * in locore.s. They are here now as stubs so that the pmap module can *
3787 * be linked as a standalone user program for testing. *
3788 ************************************************************************/
3789 /* flush_atc_crp INTERNAL
3790 **
3791 * Flush all page descriptors derived from the given CPU Root Pointer
3792 * (CRP), or 'A' table as it is known here, from the 68851's automatic
3793 * cache.
3794 */
3795 void
flush_atc_crp(int a_tbl)3796 flush_atc_crp(int a_tbl)
3797 {
3798 mmu_long_rp_t rp;
3799
3800 /* Create a temporary root table pointer that points to the
3801 * given A table.
3802 */
3803 rp.attr.raw = ~MMU_LONG_RP_LU;
3804 rp.addr.raw = (unsigned int) a_tbl;
3805
3806 mmu_pflushr(&rp);
3807 /* mmu_pflushr:
3808 * movel sp(4)@,a0
3809 * pflushr a0@
3810 * rts
3811 */
3812 }
3813 #endif /* NOT_YET */
3814