1 /* $OpenBSD: pmap.c,v 1.88 2024/02/14 06:16:53 miod Exp $ */
2
3 /*
4 * Copyright (c) 2001-2004, 2010, Miodrag Vallat.
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 /*
19 * Copyright (c) 1998-2001 Steve Murphree, Jr.
20 * Copyright (c) 1996 Nivas Madhur
21 * All rights reserved.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. All advertising materials mentioning features or use of this software
32 * must display the following acknowledgement:
33 * This product includes software developed by Nivas Madhur.
34 * 4. The name of the author may not be used to endorse or promote products
35 * derived from this software without specific prior written permission
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
38 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
39 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
40 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
41 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
42 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
43 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
44 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
45 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
46 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 *
48 */
49 /*
50 * Mach Operating System
51 * Copyright (c) 1991 Carnegie Mellon University
52 * Copyright (c) 1991 OMRON Corporation
53 * All Rights Reserved.
54 *
55 * Permission to use, copy, modify and distribute this software and its
56 * documentation is hereby granted, provided that both the copyright
57 * notice and this permission notice appear in all copies of the
58 * software, derivative works or modified versions, and any portions
59 * thereof, and that both notices appear in supporting documentation.
60 *
61 */
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/proc.h>
66 #include <sys/pool.h>
67
68 #include <uvm/uvm.h>
69
70 #include <machine/asm_macro.h>
71 #include <machine/cmmu.h>
72 #include <machine/cpu.h>
73 #include <machine/pmap_table.h>
74 #ifdef M88100
75 #include <machine/m8820x.h>
76 #endif
77 #ifdef M88110
78 #include <machine/m88110.h>
79 #endif
80
81 /*
82 * VM externals
83 */
84 extern paddr_t last_addr;
85 vaddr_t avail_start;
86 vaddr_t avail_end;
87 vaddr_t virtual_avail = VM_MIN_KERNEL_ADDRESS;
88 vaddr_t virtual_end = VM_MAX_KERNEL_ADDRESS;
89
90
91 #ifdef PMAPDEBUG
92 /*
93 * conditional debugging
94 */
95 #define CD_ACTIVATE 0x00000001 /* pmap_activate */
96 #define CD_KMAP 0x00000002 /* pmap_expand_kmap */
97 #define CD_MAP 0x00000004 /* pmap_map */
98 #define CD_CACHE 0x00000008 /* pmap_cache_ctrl */
99 #define CD_INIT 0x00000010 /* pmap_init */
100 #define CD_CREAT 0x00000020 /* pmap_create */
101 #define CD_DESTR 0x00000040 /* pmap_destroy */
102 #define CD_RM 0x00000080 /* pmap_remove / pmap_kremove */
103 #define CD_RMPG 0x00000100 /* pmap_remove_page */
104 #define CD_EXP 0x00000200 /* pmap_expand */
105 #define CD_ENT 0x00000400 /* pmap_enter / pmap_kenter_pa */
106 #define CD_COL 0x00000800 /* pmap_collect */
107 #define CD_CBIT 0x00001000 /* pmap_changebit */
108 #define CD_TBIT 0x00002000 /* pmap_testbit */
109 #define CD_USBIT 0x00004000 /* pmap_unsetbit */
110 #define CD_COPY 0x00008000 /* pmap_copy_page */
111 #define CD_ZERO 0x00010000 /* pmap_zero_page */
112 #define CD_BOOT 0x00020000 /* pmap_bootstrap */
113 #define CD_ALL 0xffffffff
114
115 int pmap_debug = CD_BOOT | CD_KMAP | CD_MAP;
116
117 #define DPRINTF(flg, stmt) \
118 do { \
119 if (pmap_debug & (flg)) \
120 printf stmt; \
121 } while (0)
122
123 #else
124
125 #define DPRINTF(flg, stmt) do { } while (0)
126
127 #endif /* PMAPDEBUG */
128
129 struct pool pmappool, pvpool;
130 struct pmap kernel_pmap_store;
131
132 /*
133 * Cacheability settings for page tables and kernel data.
134 */
135
136 apr_t pte_cmode = CACHE_WT;
137 apr_t kernel_apr = CACHE_GLOBAL | CACHE_DFL | APR_V;
138 apr_t userland_apr = CACHE_GLOBAL | CACHE_DFL | APR_V;
139
140 #define KERNEL_APR_CMODE (kernel_apr & (CACHE_MASK & ~CACHE_GLOBAL))
141 #define USERLAND_APR_CMODE (userland_apr & (CACHE_MASK & ~CACHE_GLOBAL))
142
143 /*
144 * Address and size of the temporary firmware mapping
145 */
146 paddr_t s_firmware;
147 psize_t l_firmware;
148
149 /*
150 * Current BATC values.
151 */
152
153 batc_t global_dbatc[BATC_MAX];
154 batc_t global_ibatc[BATC_MAX];
155
156 /*
157 * Internal routines
158 */
159 void pmap_changebit(struct vm_page *, int, int);
160 void pmap_clean_page(paddr_t);
161 pt_entry_t *pmap_expand(pmap_t, vaddr_t, int);
162 pt_entry_t *pmap_expand_kmap(vaddr_t, int);
163 void pmap_map(paddr_t, psize_t, vm_prot_t, u_int, boolean_t);
164 pt_entry_t *pmap_pte(pmap_t, vaddr_t);
165 void pmap_remove_page(struct vm_page *);
166 void pmap_remove_pte(pmap_t, vaddr_t, pt_entry_t *,
167 struct vm_page *, boolean_t);
168 void pmap_remove_range(pmap_t, vaddr_t, vaddr_t);
169 boolean_t pmap_testbit(struct vm_page *, int);
170
171 static __inline pv_entry_t
pg_to_pvh(struct vm_page * pg)172 pg_to_pvh(struct vm_page *pg)
173 {
174 return &pg->mdpage.pv_ent;
175 }
176
177 /*
178 * PTE routines
179 */
180
181 #define m88k_protection(prot) ((prot) & PROT_WRITE ? PG_RW : PG_RO)
182 #define pmap_pte_w(pte) (*(pte) & PG_W)
183
184 #define SDTENT(pm, va) ((pm)->pm_stab + SDTIDX(va))
185
186 /*
187 * [INTERNAL]
188 * Return the address of the pte for `va' within the page table pointed
189 * to by the segment table entry `sdt'. Assumes *sdt is a valid segment
190 * table entry.
191 */
192 static __inline__
193 pt_entry_t *
sdt_pte(sdt_entry_t * sdt,vaddr_t va)194 sdt_pte(sdt_entry_t *sdt, vaddr_t va)
195 {
196 return (pt_entry_t *)(*sdt & PG_FRAME) + PDTIDX(va);
197 }
198
199 /*
200 * [INTERNAL]
201 * Return the address of the pte for `va' in `pmap'. NULL if there is no
202 * page table for `va'.
203 */
204 pt_entry_t *
pmap_pte(pmap_t pmap,vaddr_t va)205 pmap_pte(pmap_t pmap, vaddr_t va)
206 {
207 sdt_entry_t *sdt;
208
209 sdt = SDTENT(pmap, va);
210 if (!SDT_VALID(sdt))
211 return NULL;
212
213 return sdt_pte(sdt, va);
214 }
215
216 /*
217 * [MI]
218 * Checks how virtual address `va' would translate with `pmap' as the active
219 * pmap. Returns TRUE and matching physical address in `pap' (if not NULL) if
220 * translation is possible, FAILS otherwise.
221 */
222 boolean_t
pmap_extract(pmap_t pmap,vaddr_t va,paddr_t * pap)223 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
224 {
225 paddr_t pa;
226 uint32_t ti;
227 int rv;
228
229 rv = pmap_translation_info(pmap, va, &pa, &ti);
230 if (rv == PTI_INVALID)
231 return FALSE;
232 else {
233 if (pap != NULL)
234 *pap = pa;
235 return TRUE;
236 }
237 }
238
239 /*
240 * [MD PUBLIC]
241 * Checks how virtual address `va' would translate with `pmap' as the active
242 * pmap. Returns a PTI_xxx constant indicating which translation hardware
243 * would perform the translation; if not PTI_INVALID, the matching physical
244 * address is returned into `pap', and cacheability of the mapping is
245 * returned into `ti'.
246 */
247 int
pmap_translation_info(pmap_t pmap,vaddr_t va,paddr_t * pap,uint32_t * ti)248 pmap_translation_info(pmap_t pmap, vaddr_t va, paddr_t *pap, uint32_t *ti)
249 {
250 pt_entry_t *pte;
251 vaddr_t var;
252 uint batcno;
253 int s;
254 int rv;
255
256 /*
257 * Check for a BATC translation first.
258 * We only use BATC for supervisor mappings (i.e. pmap_kernel()).
259 */
260
261 if (pmap == pmap_kernel()) {
262 /*
263 * 88100-based designs (with 8820x CMMUs) have two hardwired
264 * BATC entries which map the upper 1MB (so-called
265 * `utility space') 1:1 in supervisor space.
266 */
267 #ifdef M88100
268 if (CPU_IS88100) {
269 if (va >= BATC9_VA) {
270 *pap = va;
271 *ti = 0;
272 if (BATC9 & BATC_INH)
273 *ti |= CACHE_INH;
274 if (BATC9 & BATC_GLOBAL)
275 *ti |= CACHE_GLOBAL;
276 if (BATC9 & BATC_WT)
277 *ti |= CACHE_WT;
278 return PTI_BATC;
279 }
280 if (va >= BATC8_VA) {
281 *pap = va;
282 *ti = 0;
283 if (BATC8 & BATC_INH)
284 *ti |= CACHE_INH;
285 if (BATC8 & BATC_GLOBAL)
286 *ti |= CACHE_GLOBAL;
287 if (BATC8 & BATC_WT)
288 *ti |= CACHE_WT;
289 return PTI_BATC;
290 }
291 }
292 #endif
293
294 /*
295 * Now try all DBATC entries.
296 * Note that pmap_translation_info() might be invoked (via
297 * pmap_extract() ) for instruction faults; we *rely* upon
298 * the fact that all executable mappings covered by IBATC
299 * will be:
300 * - read-only, with no RO->RW upgrade allowed
301 * - dual mapped by ptes, so that pmap_extract() can still
302 * return a meaningful result.
303 * Should this ever change, some kernel interfaces will need
304 * to be made aware of (and carry on to callees) whether the
305 * address should be resolved as an instruction or data
306 * address.
307 */
308 var = trunc_batc(va);
309 for (batcno = 0; batcno < BATC_MAX; batcno++) {
310 vaddr_t batcva;
311 paddr_t batcpa;
312 batc_t batc;
313
314 batc = global_dbatc[batcno];
315 if ((batc & BATC_V) == 0)
316 continue;
317
318 batcva = (batc << (BATC_BLKSHIFT - BATC_VSHIFT)) &
319 ~BATC_BLKMASK;
320 if (batcva == var) {
321 batcpa = (batc <<
322 (BATC_BLKSHIFT - BATC_PSHIFT)) &
323 ~BATC_BLKMASK;
324 *pap = batcpa + (va - var);
325 *ti = 0;
326 if (batc & BATC_INH)
327 *ti |= CACHE_INH;
328 if (batc & BATC_GLOBAL)
329 *ti |= CACHE_GLOBAL;
330 if (batc & BATC_WT)
331 *ti |= CACHE_WT;
332 return PTI_BATC;
333 }
334 }
335 }
336
337 /*
338 * Check for a regular PTE translation.
339 */
340
341 s = splvm();
342 pte = pmap_pte(pmap, va);
343 if (pte != NULL && PDT_VALID(pte)) {
344 *pap = ptoa(PG_PFNUM(*pte)) | (va & PAGE_MASK);
345 *ti = (*pte | pmap->pm_apr) & CACHE_MASK;
346 rv = PTI_PTE;
347 } else
348 rv = PTI_INVALID;
349
350 splx(s);
351
352 return rv;
353 }
354
355 /*
356 * TLB (ATC) routines
357 */
358
359 void tlb_flush(pmap_t, vaddr_t, pt_entry_t);
360 void tlb_kflush(vaddr_t, pt_entry_t);
361
362 /*
363 * [INTERNAL]
364 * Update translation cache entry for `va' in `pmap' to `pte'. May flush
365 * instead of updating.
366 */
367 void
tlb_flush(pmap_t pmap,vaddr_t va,pt_entry_t pte)368 tlb_flush(pmap_t pmap, vaddr_t va, pt_entry_t pte)
369 {
370 struct cpu_info *ci;
371 boolean_t kernel = pmap == pmap_kernel();
372 #ifdef MULTIPROCESSOR
373 CPU_INFO_ITERATOR cpu;
374 #endif
375
376 #ifdef MULTIPROCESSOR
377 CPU_INFO_FOREACH(cpu, ci)
378 #else
379 ci = curcpu();
380 #endif
381 {
382 if (kernel)
383 cmmu_tlbis(ci->ci_cpuid, va, pte);
384 else if (pmap == ci->ci_curpmap)
385 cmmu_tlbiu(ci->ci_cpuid, va, pte);
386 }
387 }
388
389 /*
390 * [INTERNAL]
391 * Update translation cache entry for `va' in pmap_kernel() to `pte'. May
392 * flush instead of updating.
393 */
394 void
tlb_kflush(vaddr_t va,pt_entry_t pte)395 tlb_kflush(vaddr_t va, pt_entry_t pte)
396 {
397 struct cpu_info *ci;
398 #ifdef MULTIPROCESSOR
399 CPU_INFO_ITERATOR cpu;
400 #endif
401
402 #ifdef MULTIPROCESSOR /* { */
403 CPU_INFO_FOREACH(cpu, ci) {
404 cmmu_tlbis(ci->ci_cpuid, va, pte);
405 }
406 #else /* MULTIPROCESSOR */ /* } { */
407 ci = curcpu();
408 cmmu_tlbis(ci->ci_cpuid, va, pte);
409 #endif /* MULTIPROCESSOR */ /* } */
410 }
411
412 /*
413 * [MI]
414 * Activate the pmap of process `p'.
415 */
416 void
pmap_activate(struct proc * p)417 pmap_activate(struct proc *p)
418 {
419 pmap_t pmap = vm_map_pmap(&p->p_vmspace->vm_map);
420 struct cpu_info *ci = curcpu();
421
422 DPRINTF(CD_ACTIVATE, ("pmap_activate(%p) pmap %p\n", p, pmap));
423
424 if (pmap == pmap_kernel()) {
425 ci->ci_curpmap = NULL;
426 } else {
427 if (pmap != ci->ci_curpmap) {
428 cmmu_set_uapr(pmap->pm_apr);
429 cmmu_tlbia(ci->ci_cpuid);
430 ci->ci_curpmap = pmap;
431 }
432 }
433 }
434
435 /*
436 * [MI]
437 * Deactivates the pmap of process `p'.
438 */
439 void
pmap_deactivate(struct proc * p)440 pmap_deactivate(struct proc *p)
441 {
442 struct cpu_info *ci = curcpu();
443
444 ci->ci_curpmap = NULL;
445 }
446
447 /*
448 * Segment and page table management routines
449 */
450
451 /*
452 * [INTERNAL]
453 * Expand pmap_kernel() to be able to map a page at `va', by allocating
454 * a page table. Returns a pointer to the pte of this page, or NULL
455 * if allocation failed and `canfail' is nonzero. Panics if allocation
456 * fails and `canfail' is zero.
457 * Caller is supposed to only invoke this function if
458 * pmap_pte(pmap_kernel(), va) returns NULL.
459 */
460 pt_entry_t *
pmap_expand_kmap(vaddr_t va,int canfail)461 pmap_expand_kmap(vaddr_t va, int canfail)
462 {
463 sdt_entry_t *sdt;
464 struct vm_page *pg;
465 paddr_t pa;
466
467 DPRINTF(CD_KMAP, ("pmap_expand_kmap(%lx, %d)\n", va, canfail));
468
469 if (__predict_true(uvm.page_init_done)) {
470 pg = uvm_pagealloc(NULL, 0, NULL,
471 (canfail ? 0 : UVM_PGA_USERESERVE) | UVM_PGA_ZERO);
472 if (pg == NULL) {
473 if (canfail)
474 return NULL;
475 panic("pmap_expand_kmap(%p): uvm_pagealloc() failed",
476 (void *)va);
477 }
478 pa = VM_PAGE_TO_PHYS(pg);
479 } else {
480 pa = (paddr_t)uvm_pageboot_alloc(PAGE_SIZE);
481 if (pa == 0)
482 panic("pmap_expand_kmap(%p): uvm_pageboot_alloc() failed",
483 (void *)va);
484 bzero((void *)pa, PAGE_SIZE);
485 }
486
487 pmap_cache_ctrl(pa, pa + PAGE_SIZE, pte_cmode);
488 sdt = SDTENT(pmap_kernel(), va);
489 *sdt = pa | SG_SO | SG_RW | PG_M | SG_V;
490 return sdt_pte(sdt, va);
491 }
492
493 /*
494 * [INTERNAL]
495 * Expand `pmap' to be able to map a page at `va', by allocating
496 * a page table. Returns a pointer to the pte of this page, or NULL
497 * if allocation failed and `canfail' is nonzero. Waits until memory is
498 * available if allocation fails and `canfail' is zero.
499 * Caller is supposed to only invoke this function if
500 * pmap_pte(pmap, va) returns NULL.
501 */
502 pt_entry_t *
pmap_expand(pmap_t pmap,vaddr_t va,int canfail)503 pmap_expand(pmap_t pmap, vaddr_t va, int canfail)
504 {
505 struct vm_page *pg;
506 paddr_t pa;
507 sdt_entry_t *sdt;
508
509 DPRINTF(CD_EXP, ("pmap_expand(%p, %lx, %d)\n", pmap, va, canfail));
510
511 sdt = SDTENT(pmap, va);
512 for (;;) {
513 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
514 if (pg != NULL)
515 break;
516 if (canfail)
517 return NULL;
518 uvm_wait(__func__);
519 }
520
521 pa = VM_PAGE_TO_PHYS(pg);
522 pmap_cache_ctrl(pa, pa + PAGE_SIZE, pte_cmode);
523
524 *sdt = pa | SG_RW | PG_M | SG_V;
525
526 return sdt_pte(sdt, va);
527 }
528
529 /*
530 * Bootstrap routines
531 */
532
533 /*
534 * [MI]
535 * Early allocation, directly from the vm_physseg ranges of managed pages
536 * passed to UVM. Pages ``stolen'' by this routine will never be seen as
537 * managed pages and will not have vm_page structs created for them,
538 */
539 vaddr_t
pmap_steal_memory(vsize_t size,vaddr_t * vstartp,vaddr_t * vendp)540 pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
541 {
542 vaddr_t va;
543 u_int npg;
544
545 size = round_page(size);
546 npg = atop(size);
547
548 /* m88k systems only have one segment. */
549 #ifdef DIAGNOSTIC
550 if (vm_physmem[0].avail_end - vm_physmem[0].avail_start < npg)
551 panic("pmap_steal_memory(%lx): out of memory", size);
552 #endif
553
554 va = ptoa(vm_physmem[0].avail_start);
555 vm_physmem[0].avail_start += npg;
556 vm_physmem[0].start += npg;
557
558 if (vstartp != NULL)
559 *vstartp = virtual_avail;
560 if (vendp != NULL)
561 *vendp = virtual_end;
562
563 bzero((void *)va, size);
564 return (va);
565 }
566
567 /*
568 * [INTERNAL]
569 * Setup a wired mapping in pmap_kernel(). Similar to pmap_kenter_pa(),
570 * but allows explicit cacheability control.
571 * This is only used at bootstrap time. Mappings may also be backed up
572 * by a BATC entry if requested and possible; but note that the BATC
573 * entries set up here may be overwritten by cmmu_batc_setup() later on
574 * (which is harmless since we are creating proper ptes anyway).
575 */
576 void
pmap_map(paddr_t pa,psize_t sz,vm_prot_t prot,u_int cmode,boolean_t may_use_batc)577 pmap_map(paddr_t pa, psize_t sz, vm_prot_t prot, u_int cmode,
578 boolean_t may_use_batc)
579 {
580 pt_entry_t *pte, npte;
581 batc_t batc;
582 uint npg, batcno;
583 paddr_t curpa;
584
585 DPRINTF(CD_MAP, ("pmap_map(%lx, %lx, %x, %x)\n",
586 pa, sz, prot, cmode));
587 #ifdef DIAGNOSTIC
588 if (pa != 0 && pa < VM_MAX_KERNEL_ADDRESS)
589 panic("pmap_map: virtual range %p-%p overlaps KVM",
590 (void *)pa, (void *)(pa + sz));
591 #endif
592
593 sz = round_page(pa + sz) - trunc_page(pa);
594 pa = trunc_page(pa);
595
596 npte = m88k_protection(prot) | cmode | PG_W | PG_V;
597 #ifdef M88110
598 if (CPU_IS88110 && m88k_protection(prot) != PG_RO)
599 npte |= PG_M;
600 #endif
601
602 npg = atop(sz);
603 curpa = pa;
604 while (npg-- != 0) {
605 if ((pte = pmap_pte(pmap_kernel(), curpa)) == NULL)
606 pte = pmap_expand_kmap(curpa, 0);
607
608 *pte = npte | curpa;
609 curpa += PAGE_SIZE;
610 pmap_kernel()->pm_stats.resident_count++;
611 pmap_kernel()->pm_stats.wired_count++;
612 }
613
614 if (may_use_batc) {
615 sz = round_batc(pa + sz) - trunc_batc(pa);
616 pa = trunc_batc(pa);
617
618 batc = BATC_SO | BATC_V;
619 if ((prot & PROT_WRITE) == 0)
620 batc |= BATC_PROT;
621 if (cmode & CACHE_INH)
622 batc |= BATC_INH;
623 if (cmode & CACHE_WT)
624 batc |= BATC_WT;
625 batc |= BATC_GLOBAL; /* XXX 88110 SP */
626
627 for (; sz != 0; sz -= BATC_BLKBYTES, pa += BATC_BLKBYTES) {
628 /* check if an existing BATC covers this area */
629 for (batcno = 0; batcno < BATC_MAX; batcno++) {
630 if ((global_dbatc[batcno] & BATC_V) == 0)
631 continue;
632 curpa = (global_dbatc[batcno] <<
633 (BATC_BLKSHIFT - BATC_PSHIFT)) &
634 ~BATC_BLKMASK;
635 if (curpa == pa)
636 break;
637 }
638
639 /*
640 * If there is a BATC covering this range, reuse it.
641 * We assume all BATC-possible mappings will use the
642 * same protection and cacheability settings.
643 */
644 if (batcno != BATC_MAX)
645 continue;
646
647 /* create a new DBATC if possible */
648 for (batcno = BATC_MAX; batcno != 0; batcno--) {
649 if (global_dbatc[batcno - 1] & BATC_V)
650 continue;
651 global_dbatc[batcno - 1] = batc |
652 ((pa >> BATC_BLKSHIFT) << BATC_PSHIFT) |
653 ((pa >> BATC_BLKSHIFT) << BATC_VSHIFT);
654 break;
655 }
656 }
657 }
658 }
659
660 /*
661 * [MD]
662 * Initialize kernel translation tables.
663 */
664 void
pmap_bootstrap(paddr_t s_rom,paddr_t e_rom)665 pmap_bootstrap(paddr_t s_rom, paddr_t e_rom)
666 {
667 paddr_t s_low, s_text, e_rodata;
668 unsigned int npdtpg, nsdt, npdt;
669 unsigned int i;
670 sdt_entry_t *sdt;
671 pt_entry_t *pte, template;
672 paddr_t pa, sdtpa, ptepa;
673 const struct pmap_table *ptable;
674 extern void *kernelstart;
675 extern void *erodata;
676
677 virtual_avail = (vaddr_t)avail_end;
678
679 s_text = trunc_page((vaddr_t)&kernelstart);
680 e_rodata = round_page((vaddr_t)&erodata);
681
682 /*
683 * Reserve space for 1:1 memory mapping in supervisor space.
684 * We need:
685 * - roundup(avail_end, SDT_SIZE) / SDT_SIZE segment tables;
686 * these will fit in one page.
687 * - roundup(avail_end, PDT_SIZE) / PDT_SIZE page tables;
688 * these will span several pages.
689 */
690
691 nsdt = roundup(avail_end, (1 << SDT_SHIFT)) >> SDT_SHIFT;
692 npdt = roundup(avail_end, (1 << PDT_SHIFT)) >> PDT_SHIFT;
693 DPRINTF(CD_BOOT, ("avail_end %08lx pages %08lx nsdt %08x npdt %08x\n",
694 avail_end, atop(avail_end), nsdt, npdt));
695
696 /*
697 * Since page tables may need specific cacheability settings,
698 * we need to make sure they will not end up in the BATC
699 * mapping the end of the kernel data.
700 *
701 * The CMMU initialization code will try, whenever possible, to
702 * setup 512KB BATC entries to map the kernel text and data,
703 * therefore platform-specific code is expected to register a
704 * non-overlapping range of pages (so that their cacheability
705 * can be controlled at the PTE level).
706 *
707 * If there is enough room between the firmware image and the
708 * beginning of the BATC-mapped region, we will setup the
709 * initial page tables there (and actually try to setup as many
710 * second level pages as possible, since this memory is not
711 * given to the VM system).
712 */
713
714 npdtpg = atop(round_page(npdt * sizeof(pt_entry_t)));
715 s_low = trunc_batc(s_text);
716
717 if (e_rom == 0)
718 s_rom = e_rom = PAGE_SIZE;
719 DPRINTF(CD_BOOT, ("nsdt %d npdt %d npdtpg %d\n", nsdt, npdt, npdtpg));
720 DPRINTF(CD_BOOT, ("area below the kernel %lx-%lx: %ld pages, need %d\n",
721 e_rom, s_low, atop(s_low - e_rom), npdtpg + 1));
722 if (e_rom < s_low && npdtpg + 1 <= atop(s_low - e_rom)) {
723 sdtpa = e_rom;
724 ptepa = sdtpa + PAGE_SIZE;
725 } else {
726 sdtpa = (paddr_t)uvm_pageboot_alloc(PAGE_SIZE);
727 ptepa = (paddr_t)uvm_pageboot_alloc(ptoa(npdtpg));
728 }
729
730 sdt = (sdt_entry_t *)sdtpa;
731 pte = (pt_entry_t *)ptepa;
732 pmap_kernel()->pm_stab = sdt;
733
734 DPRINTF(CD_BOOT, ("kernel sdt %p", sdt));
735 pa = ptepa;
736 for (i = nsdt; i != 0; i--) {
737 *sdt++ = pa | SG_SO | SG_RW | PG_M | SG_V;
738 pa += PAGE_SIZE;
739 }
740 DPRINTF(CD_BOOT, ("-%p\n", sdt));
741 for (i = (PAGE_SIZE / sizeof(sdt_entry_t)) - nsdt; i != 0; i--)
742 *sdt++ = SG_NV;
743 KDASSERT((vaddr_t)sdt == ptepa);
744
745 DPRINTF(CD_BOOT, ("kernel pte %p", pte));
746 /* memory below the kernel image */
747 for (i = atop(s_text); i != 0; i--)
748 *pte++ = PG_NV;
749 /* kernel text and rodata */
750 pa = s_text;
751 for (i = atop(e_rodata) - atop(pa); i != 0; i--) {
752 *pte++ = pa | PG_SO | PG_RO | PG_W | PG_V;
753 pa += PAGE_SIZE;
754 }
755 /* kernel data and symbols */
756 for (i = atop(avail_start) - atop(pa); i != 0; i--) {
757 #ifdef MULTIPROCESSOR
758 *pte++ = pa | PG_SO | PG_RW | PG_M_U | PG_W | PG_V | CACHE_WT;
759 #else
760 *pte++ = pa | PG_SO | PG_RW | PG_M_U | PG_W | PG_V;
761 #endif
762 pa += PAGE_SIZE;
763 }
764 /* regular memory */
765 for (i = atop(avail_end) - atop(pa); i != 0; i--) {
766 *pte++ = pa | PG_SO | PG_RW | PG_M_U | PG_V;
767 pa += PAGE_SIZE;
768 }
769 DPRINTF(CD_BOOT, ("-%p, pa %08lx\n", pte, pa));
770 for (i = (pt_entry_t *)round_page((vaddr_t)pte) - pte; i != 0; i--)
771 *pte++ = PG_NV;
772
773 /* kernel page tables */
774 pte_cmode = cmmu_pte_cmode();
775 template = PG_SO | PG_RW | PG_M_U | PG_W | PG_V | pte_cmode;
776 pa = sdtpa;
777 pte = (pt_entry_t *)ptepa + atop(pa);
778 for (i = 1 + npdtpg; i != 0; i--) {
779 *pte++ = pa | template;
780 pa += PAGE_SIZE;
781 }
782
783 /*
784 * Create all the machine-specific mappings.
785 * XXX This should eventually get done in machdep.c instead of here;
786 * XXX and on a driver basis on luna88k... If only to be able to grow
787 * XXX VM_MAX_KERNEL_ADDRESS.
788 */
789
790 if (e_rom != s_rom) {
791 s_firmware = s_rom;
792 l_firmware = e_rom - s_rom;
793 pmap_map(s_firmware, l_firmware, PROT_READ | PROT_WRITE,
794 CACHE_INH, FALSE);
795 }
796
797 for (ptable = pmap_table_build(); ptable->size != (vsize_t)-1; ptable++)
798 if (ptable->size != 0)
799 pmap_map(ptable->start, ptable->size,
800 ptable->prot, ptable->cacheability,
801 ptable->may_use_batc);
802
803 /*
804 * Adjust cache settings according to the hardware we are running on.
805 */
806
807 kernel_apr = (kernel_apr & ~(CACHE_MASK & ~CACHE_GLOBAL)) |
808 cmmu_apr_cmode();
809 #if defined(M88110) && !defined(MULTIPROCESSOR)
810 if (CPU_IS88110)
811 kernel_apr &= ~CACHE_GLOBAL;
812 #endif
813 userland_apr = (userland_apr & ~CACHE_MASK) | (kernel_apr & CACHE_MASK);
814
815 /*
816 * Switch to using new page tables
817 */
818
819 pmap_kernel()->pm_count = 1;
820 pmap_kernel()->pm_apr = sdtpa | kernel_apr;
821
822 DPRINTF(CD_BOOT, ("default apr %08x kernel apr %08lx\n",
823 kernel_apr, sdtpa));
824
825 pmap_bootstrap_cpu(cpu_number());
826 }
827
828 /*
829 * [MD]
830 * Enable address translation on the current processor.
831 */
832 void
pmap_bootstrap_cpu(cpuid_t cpu)833 pmap_bootstrap_cpu(cpuid_t cpu)
834 {
835 /* Load supervisor pointer to segment table. */
836 cmmu_set_sapr(pmap_kernel()->pm_apr);
837 #ifdef PMAPDEBUG
838 printf("cpu%lu: running virtual\n", cpu);
839 #endif
840
841 cmmu_batc_setup(cpu, kernel_apr & CACHE_MASK);
842
843 curcpu()->ci_curpmap = NULL;
844 }
845
846 /*
847 * [MD]
848 * Remove firmware mappings when they are no longer necessary.
849 */
850 void
pmap_unmap_firmware()851 pmap_unmap_firmware()
852 {
853 if (l_firmware != 0) {
854 pmap_kremove(s_firmware, l_firmware);
855 pmap_update(pmap_kernel());
856 }
857 }
858
859 /*
860 * [MI]
861 * Complete the pmap layer initialization, to be able to manage userland
862 * pmaps.
863 */
864 void
pmap_init(void)865 pmap_init(void)
866 {
867 DPRINTF(CD_INIT, ("pmap_init()\n"));
868 pool_init(&pmappool, sizeof(struct pmap), 0, IPL_NONE, 0,
869 "pmappl", &pool_allocator_single);
870 pool_init(&pvpool, sizeof(pv_entry_t), 0, IPL_VM, 0, "pvpl", NULL);
871 }
872
873 /*
874 * Pmap structure management
875 */
876
877 /*
878 * [MI]
879 * Create a new pmap.
880 */
881 pmap_t
pmap_create(void)882 pmap_create(void)
883 {
884 pmap_t pmap;
885 struct vm_page *pg;
886 paddr_t pa;
887
888 pmap = pool_get(&pmappool, PR_WAITOK | PR_ZERO);
889
890 /* Allocate the segment table page immediately. */
891 for (;;) {
892 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
893 if (pg != NULL)
894 break;
895 uvm_wait(__func__);
896 }
897
898 pa = VM_PAGE_TO_PHYS(pg);
899 pmap_cache_ctrl(pa, pa + PAGE_SIZE, pte_cmode);
900
901 pmap->pm_stab = (sdt_entry_t *)pa;
902 pmap->pm_apr = pa | userland_apr;
903 pmap->pm_count = 1;
904
905 DPRINTF(CD_CREAT, ("pmap_create() -> pmap %p, pm_stab %lx\n", pmap, pa));
906
907 return pmap;
908 }
909
910 /*
911 * [MI]
912 * Decreased the pmap reference count, and destroy it when it reaches zero.
913 */
914 void
pmap_destroy(pmap_t pmap)915 pmap_destroy(pmap_t pmap)
916 {
917 u_int u;
918 sdt_entry_t *sdt;
919 paddr_t pa;
920
921 DPRINTF(CD_DESTR, ("pmap_destroy(%p)\n", pmap));
922 if (--pmap->pm_count == 0) {
923 for (u = SDT_ENTRIES, sdt = pmap->pm_stab; u != 0; sdt++, u--) {
924 if (SDT_VALID(sdt)) {
925 pa = *sdt & PG_FRAME;
926 pmap_cache_ctrl(pa, pa + PAGE_SIZE, CACHE_DFL);
927 uvm_pagefree(PHYS_TO_VM_PAGE(pa));
928 }
929 }
930 pa = (paddr_t)pmap->pm_stab;
931 pmap_cache_ctrl(pa, pa + PAGE_SIZE, CACHE_DFL);
932 uvm_pagefree(PHYS_TO_VM_PAGE(pa));
933 pool_put(&pmappool, pmap);
934 }
935 }
936
937 /*
938 * [MI]
939 * Increase the pmap reference count.
940 */
941 void
pmap_reference(pmap_t pmap)942 pmap_reference(pmap_t pmap)
943 {
944 pmap->pm_count++;
945 }
946
947 /*
948 * [MI]
949 * Attempt to regain memory by freeing disposable page tables.
950 */
951 void
pmap_collect(pmap_t pmap)952 pmap_collect(pmap_t pmap)
953 {
954 u_int u, v;
955 sdt_entry_t *sdt;
956 pt_entry_t *pte;
957 vaddr_t va;
958 paddr_t pa;
959 int s;
960
961 DPRINTF(CD_COL, ("pmap_collect(%p)\n", pmap));
962
963 s = splvm();
964 for (sdt = pmap->pm_stab, va = 0, u = SDT_ENTRIES; u != 0;
965 sdt++, va += (1 << SDT_SHIFT), u--) {
966 if (!SDT_VALID(sdt))
967 continue;
968 pte = sdt_pte(sdt, 0);
969 for (v = PDT_ENTRIES; v != 0; pte++, v--)
970 if (pmap_pte_w(pte)) /* wired mappings can't go */
971 break;
972 if (v != 0)
973 continue;
974 /* found a suitable pte page to reclaim */
975 pmap_remove_range(pmap, va, va + (1 << SDT_SHIFT));
976
977 pa = *sdt & PG_FRAME;
978 *sdt = SG_NV;
979 pmap_cache_ctrl(pa, pa + PAGE_SIZE, CACHE_DFL);
980 uvm_pagefree(PHYS_TO_VM_PAGE(pa));
981 }
982 splx(s);
983
984 DPRINTF(CD_COL, ("pmap_collect(%p) done\n", pmap));
985 }
986
987 /*
988 * Virtual mapping/unmapping routines
989 */
990
991 /*
992 * [MI]
993 * Establish a `va' to `pa' translation with protection `prot' in `pmap'.
994 * The `flags' argument contains the expected usage protection of the
995 * mapping (and may differ from the currently requested protection), as
996 * well as a possible PMAP_WIRED flag.
997 */
998 int
pmap_enter(pmap_t pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,int flags)999 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
1000 {
1001 int s;
1002 pt_entry_t *pte, npte;
1003 paddr_t old_pa;
1004 pv_entry_t pv_e, head;
1005 boolean_t wired = (flags & PMAP_WIRED) != 0;
1006 struct vm_page *pg;
1007
1008 DPRINTF(CD_ENT, ("pmap_enter(%p, %lx, %lx, %x, %x)\n",
1009 pmap, va, pa, prot, flags));
1010
1011 npte = m88k_protection(prot);
1012
1013 /*
1014 * Expand pmap to include this pte.
1015 */
1016 if ((pte = pmap_pte(pmap, va)) == NULL) {
1017 if (pmap == pmap_kernel())
1018 pte = pmap_expand_kmap(va, flags & PMAP_CANFAIL);
1019 else
1020 pte = pmap_expand(pmap, va, flags & PMAP_CANFAIL);
1021
1022 /* will only return NULL if PMAP_CANFAIL is set */
1023 if (pte == NULL) {
1024 DPRINTF(CD_ENT, ("failed (ENOMEM)\n"));
1025 return (ENOMEM);
1026 }
1027 }
1028
1029 /*
1030 * Special case if the physical page is already mapped at this address.
1031 */
1032 old_pa = ptoa(PG_PFNUM(*pte));
1033 DPRINTF(CD_ENT, ("pmap_enter: old_pa %lx pte %x\n", old_pa, *pte));
1034
1035 pg = PHYS_TO_VM_PAGE(pa);
1036 s = splvm();
1037
1038 if (old_pa == pa) {
1039 /* May be changing its wired attributes or protection */
1040 if (wired && !(pmap_pte_w(pte)))
1041 pmap->pm_stats.wired_count++;
1042 else if (!wired && pmap_pte_w(pte))
1043 pmap->pm_stats.wired_count--;
1044 } else {
1045 /* Remove old mapping from the PV list if necessary. */
1046 if (PDT_VALID(pte))
1047 pmap_remove_pte(pmap, va, pte, NULL, FALSE);
1048
1049 if (pg != NULL) {
1050 /*
1051 * Enter the mapping in the PV list for this
1052 * managed page.
1053 */
1054 head = pg_to_pvh(pg);
1055 if (head->pv_pmap == NULL) {
1056 /*
1057 * No mappings yet.
1058 */
1059 head->pv_va = va;
1060 head->pv_pmap = pmap;
1061 head->pv_next = NULL;
1062 pg->mdpage.pv_flags = 0;
1063 } else {
1064 /*
1065 * Add new pv_entry after header.
1066 */
1067 pv_e = pool_get(&pvpool, PR_NOWAIT);
1068 if (pv_e == NULL) {
1069 /* Invalidate the old pte anyway */
1070 tlb_flush(pmap, va, PG_NV);
1071
1072 if (flags & PMAP_CANFAIL) {
1073 splx(s);
1074 return (ENOMEM);
1075 } else
1076 panic("pmap_enter: "
1077 "pvpool exhausted");
1078 }
1079 pv_e->pv_va = va;
1080 pv_e->pv_pmap = pmap;
1081 pv_e->pv_next = head->pv_next;
1082 head->pv_next = pv_e;
1083 }
1084 }
1085
1086 /*
1087 * And count the mapping.
1088 */
1089 pmap->pm_stats.resident_count++;
1090 if (wired)
1091 pmap->pm_stats.wired_count++;
1092 } /* if (pa == old_pa) ... else */
1093
1094 npte |= PG_V;
1095 if (wired)
1096 npte |= PG_W;
1097
1098 if (prot & PROT_WRITE) {
1099 /*
1100 * On 88110, do not mark writable mappings as dirty unless we
1101 * know the page is dirty, or we are using the kernel pmap.
1102 */
1103 if (CPU_IS88110 && pmap != pmap_kernel() &&
1104 pg != NULL && (pg->mdpage.pv_flags & PG_M) == 0)
1105 npte |= PG_U;
1106 else
1107 npte |= PG_M_U;
1108 } else if (prot & PROT_MASK)
1109 npte |= PG_U;
1110
1111 /*
1112 * If outside physical memory, disable cache on this (device) page.
1113 */
1114 if (pa >= last_addr)
1115 npte |= CACHE_INH;
1116
1117 /*
1118 * Invalidate pte temporarily to avoid being written
1119 * back the modified bit and/or the reference bit by
1120 * any other cpu.
1121 */
1122 npte |= invalidate_pte(pte) & PG_M_U;
1123 npte |= pa;
1124 *pte = npte;
1125 tlb_flush(pmap, va, npte);
1126 DPRINTF(CD_ENT, ("pmap_enter: new pte %x\n", npte));
1127
1128 /*
1129 * Cache attribute flags
1130 */
1131 if (pg != NULL) {
1132 if (flags & PROT_WRITE) {
1133 if (CPU_IS88110 && pmap != pmap_kernel())
1134 pg->mdpage.pv_flags |= PG_U;
1135 else
1136 pg->mdpage.pv_flags |= PG_M_U;
1137 } else if (flags & PROT_MASK)
1138 pg->mdpage.pv_flags |= PG_U;
1139 }
1140
1141 splx(s);
1142
1143 return 0;
1144 }
1145
1146 /*
1147 * [MI]
1148 * Fast pmap_enter() version for pmap_kernel() and unmanaged pages.
1149 */
1150 void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot)1151 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
1152 {
1153 pt_entry_t *pte, npte;
1154
1155 DPRINTF(CD_ENT, ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
1156
1157 npte = m88k_protection(prot) | PG_W | PG_V;
1158 #ifdef M88110
1159 if (CPU_IS88110 && m88k_protection(prot) != PG_RO)
1160 npte |= PG_M;
1161 #endif
1162 /*
1163 * If outside physical memory, disable cache on this (device) page.
1164 */
1165 if (pa >= last_addr)
1166 npte |= CACHE_INH;
1167
1168 /*
1169 * Expand pmap to include this pte.
1170 */
1171 if ((pte = pmap_pte(pmap_kernel(), va)) == NULL)
1172 pte = pmap_expand_kmap(va, 0);
1173
1174 /*
1175 * And count the mapping.
1176 */
1177 pmap_kernel()->pm_stats.resident_count++;
1178 pmap_kernel()->pm_stats.wired_count++;
1179
1180 invalidate_pte(pte);
1181 npte |= pa;
1182 *pte = npte;
1183 tlb_kflush(va, npte);
1184 }
1185
1186 /*
1187 * [INTERNAL]
1188 * Remove the page at `va' in `pmap', which pte is pointed to by `pte', and
1189 * update the status of the vm_page matching this translation (if this is
1190 * indeed a managed page). Flush the tlb entry if `flush' is nonzero.
1191 */
1192 void
pmap_remove_pte(pmap_t pmap,vaddr_t va,pt_entry_t * pte,struct vm_page * pg,boolean_t flush)1193 pmap_remove_pte(pmap_t pmap, vaddr_t va, pt_entry_t *pte, struct vm_page *pg,
1194 boolean_t flush)
1195 {
1196 pt_entry_t opte;
1197 pv_entry_t prev, cur, head;
1198 paddr_t pa;
1199
1200 splassert(IPL_VM);
1201 DPRINTF(CD_RM, ("pmap_remove_pte(%p, %lx, %d)\n", pmap, va, flush));
1202
1203 /*
1204 * Update statistics.
1205 */
1206 pmap->pm_stats.resident_count--;
1207 if (pmap_pte_w(pte))
1208 pmap->pm_stats.wired_count--;
1209
1210 pa = ptoa(PG_PFNUM(*pte));
1211
1212 /*
1213 * Invalidate the pte.
1214 */
1215
1216 opte = invalidate_pte(pte) & PG_M_U;
1217 if (flush)
1218 tlb_flush(pmap, va, PG_NV);
1219
1220 if (pg == NULL) {
1221 pg = PHYS_TO_VM_PAGE(pa);
1222 /* If this isn't a managed page, just return. */
1223 if (pg == NULL)
1224 return;
1225 }
1226
1227 /*
1228 * Remove the mapping from the pvlist for
1229 * this physical page.
1230 */
1231 head = pg_to_pvh(pg);
1232
1233 #ifdef DIAGNOSTIC
1234 if (head->pv_pmap == NULL)
1235 panic("pmap_remove_pte(%p, %p, %p, %p/%p, %d): null pv_list",
1236 pmap, (void *)va, pte, (void *)pa, pg, flush);
1237 #endif
1238
1239 prev = NULL;
1240 for (cur = head; cur != NULL; cur = cur->pv_next) {
1241 if (cur->pv_va == va && cur->pv_pmap == pmap)
1242 break;
1243 prev = cur;
1244 }
1245 if (cur == NULL) {
1246 panic("pmap_remove_pte(%p, %p, %p, %p, %d): mapping for va "
1247 "(pa %p) not in pv list at %p",
1248 pmap, (void *)va, pte, pg, flush, (void *)pa, head);
1249 }
1250
1251 if (prev == NULL) {
1252 /*
1253 * Handler is the pv_entry. Copy the next one
1254 * to handler and free the next one (we can't
1255 * free the handler)
1256 */
1257 cur = cur->pv_next;
1258 if (cur != NULL) {
1259 *head = *cur;
1260 pool_put(&pvpool, cur);
1261 } else {
1262 head->pv_pmap = NULL;
1263 /*
1264 * This page is no longer in use, and is likely
1265 * to be reused soon; since it may still have
1266 * dirty cache lines and may be used for I/O
1267 * (and risk being invalidated by the bus_dma
1268 * code without getting a chance of writeback),
1269 * we make sure the page gets written back.
1270 */
1271 pmap_clean_page(pa);
1272 }
1273 } else {
1274 prev->pv_next = cur->pv_next;
1275 pool_put(&pvpool, cur);
1276 }
1277
1278 /* Update saved attributes for managed page */
1279 pg->mdpage.pv_flags |= opte;
1280 }
1281
1282 /*
1283 * [INTERNAL]
1284 * Removes all mappings within the `sva'..`eva' range in `pmap'.
1285 */
1286 void
pmap_remove_range(pmap_t pmap,vaddr_t sva,vaddr_t eva)1287 pmap_remove_range(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1288 {
1289 vaddr_t va, eseg;
1290 pt_entry_t *pte;
1291
1292 DPRINTF(CD_RM, ("pmap_remove_range(%p, %lx, %lx)\n", pmap, sva, eva));
1293
1294 /*
1295 * Loop through the range in PAGE_SIZE increments.
1296 */
1297 va = sva;
1298 while (va != eva) {
1299 sdt_entry_t *sdt;
1300
1301 eseg = (va & SDT_MASK) + (1 << SDT_SHIFT);
1302 if (eseg > eva || eseg == 0)
1303 eseg = eva;
1304
1305 sdt = SDTENT(pmap, va);
1306 /* If no segment table, skip a whole segment */
1307 if (!SDT_VALID(sdt))
1308 va = eseg;
1309 else {
1310 pte = sdt_pte(sdt, va);
1311 while (va != eseg) {
1312 if (PDT_VALID(pte))
1313 pmap_remove_pte(pmap, va, pte, NULL,
1314 TRUE);
1315 va += PAGE_SIZE;
1316 pte++;
1317 }
1318 }
1319 }
1320 }
1321
1322 /*
1323 * [MI]
1324 * Removes all mappings within the `sva'..`eva' range in `pmap'.
1325 */
1326 void
pmap_remove(pmap_t pmap,vaddr_t sva,vaddr_t eva)1327 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1328 {
1329 int s;
1330
1331 KERNEL_LOCK();
1332 s = splvm();
1333 pmap_remove_range(pmap, sva, eva);
1334 splx(s);
1335 KERNEL_UNLOCK();
1336 }
1337
1338 /*
1339 * [MI]
1340 * Fast pmap_remove() version for pmap_kernel() and unmanaged pages.
1341 */
1342 void
pmap_kremove(vaddr_t va,vsize_t len)1343 pmap_kremove(vaddr_t va, vsize_t len)
1344 {
1345 vaddr_t e, eseg;
1346
1347 DPRINTF(CD_RM, ("pmap_kremove(%lx, %lx)\n", va, len));
1348
1349 e = va + len;
1350 while (va != e) {
1351 sdt_entry_t *sdt;
1352 pt_entry_t *pte, opte;
1353
1354 eseg = (va & SDT_MASK) + (1 << SDT_SHIFT);
1355 if (eseg > e || eseg == 0)
1356 eseg = e;
1357
1358 sdt = SDTENT(pmap_kernel(), va);
1359
1360 /* If no segment table, skip a whole segment */
1361 if (!SDT_VALID(sdt))
1362 va = eseg;
1363 else {
1364 pte = sdt_pte(sdt, va);
1365 while (va != eseg) {
1366 if (PDT_VALID(pte)) {
1367 /* Update the counts */
1368 pmap_kernel()->pm_stats.resident_count--;
1369 pmap_kernel()->pm_stats.wired_count--;
1370
1371 opte = invalidate_pte(pte);
1372 tlb_kflush(va, PG_NV);
1373
1374 /*
1375 * Make sure the page is written back
1376 * if it was cached.
1377 */
1378 if ((opte & (CACHE_INH | CACHE_WT)) ==
1379 0)
1380 pmap_clean_page(
1381 ptoa(PG_PFNUM(opte)));
1382 }
1383 va += PAGE_SIZE;
1384 pte++;
1385 }
1386 }
1387 }
1388 }
1389
1390 /*
1391 * [INTERNAL]
1392 * Removes all mappings of managed page `pg'.
1393 */
1394 void
pmap_remove_page(struct vm_page * pg)1395 pmap_remove_page(struct vm_page *pg)
1396 {
1397 pt_entry_t *pte;
1398 pv_entry_t head, pvep;
1399 vaddr_t va;
1400 pmap_t pmap;
1401 int s;
1402
1403 DPRINTF(CD_RMPG, ("pmap_remove_page(%p)\n", pg));
1404
1405 s = splvm();
1406 /*
1407 * Walk down PV list, removing all mappings.
1408 */
1409 pvep = head = pg_to_pvh(pg);
1410 while (pvep != NULL && (pmap = pvep->pv_pmap) != NULL) {
1411 va = pvep->pv_va;
1412 pte = pmap_pte(pmap, va);
1413
1414 if (pte == NULL || !PDT_VALID(pte)) {
1415 pvep = pvep->pv_next;
1416 continue; /* no page mapping */
1417 }
1418
1419 pmap_remove_pte(pmap, va, pte, pg, TRUE);
1420 pvep = head;
1421 /*
1422 * Do not free any empty page tables,
1423 * leave that for when VM calls pmap_collect().
1424 */
1425 }
1426 splx(s);
1427 }
1428
1429 /*
1430 * [MI]
1431 * Strengthens the protection of the `sva'..`eva' range within `pmap' to `prot'.
1432 */
1433 void
pmap_protect(pmap_t pmap,vaddr_t sva,vaddr_t eva,vm_prot_t prot)1434 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1435 {
1436 int s;
1437 pt_entry_t *pte, ap, opte, npte;
1438 vaddr_t va, eseg;
1439
1440 if ((prot & PROT_READ) == 0) {
1441 pmap_remove(pmap, sva, eva);
1442 return;
1443 }
1444
1445 ap = m88k_protection(prot);
1446
1447 s = splvm();
1448 /*
1449 * Loop through the range in PAGE_SIZE increments.
1450 */
1451 va = sva;
1452 while (va != eva) {
1453 sdt_entry_t *sdt;
1454
1455 eseg = (va & SDT_MASK) + (1 << SDT_SHIFT);
1456 if (eseg > eva || eseg == 0)
1457 eseg = eva;
1458
1459 sdt = SDTENT(pmap, va);
1460 /* If no segment table, skip a whole segment */
1461 if (!SDT_VALID(sdt))
1462 va = eseg;
1463 else {
1464 pte = sdt_pte(sdt, va);
1465 while (va != eseg) {
1466 if (PDT_VALID(pte)) {
1467 /*
1468 * Invalidate pte temporarily to avoid
1469 * the modified bit and/or the
1470 * reference bit being written back by
1471 * any other cpu.
1472 */
1473 opte = invalidate_pte(pte);
1474 npte = ap | (opte & ~PG_PROT);
1475 *pte = npte;
1476 tlb_flush(pmap, va, npte);
1477 }
1478 va += PAGE_SIZE;
1479 pte++;
1480 }
1481 }
1482 }
1483 splx(s);
1484 }
1485
1486 /*
1487 * [MI]
1488 * Removes the wired state of the page at `va' in `pmap'.
1489 */
1490 void
pmap_unwire(pmap_t pmap,vaddr_t va)1491 pmap_unwire(pmap_t pmap, vaddr_t va)
1492 {
1493 pt_entry_t *pte;
1494
1495 pte = pmap_pte(pmap, va);
1496 if (pmap_pte_w(pte)) {
1497 pmap->pm_stats.wired_count--;
1498 *pte &= ~PG_W;
1499 }
1500 }
1501
1502 /*
1503 * vm_page management routines
1504 */
1505
1506 /*
1507 * [MI]
1508 * Copies vm_page `srcpg' to `dstpg'.
1509 */
1510 void
pmap_copy_page(struct vm_page * srcpg,struct vm_page * dstpg)1511 pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
1512 {
1513 paddr_t src = VM_PAGE_TO_PHYS(srcpg);
1514 paddr_t dst = VM_PAGE_TO_PHYS(dstpg);
1515
1516 DPRINTF(CD_COPY, ("pmap_copy_page(%p,%p) pa %lx %lx\n",
1517 srcpg, dstpg, src, dst));
1518 curcpu()->ci_copypage((vaddr_t)src, (vaddr_t)dst);
1519
1520 if (KERNEL_APR_CMODE == CACHE_DFL)
1521 cmmu_dcache_wb(cpu_number(), dst, PAGE_SIZE);
1522 }
1523
1524 /*
1525 * [MI]
1526 * Clears vm_page `pg'.
1527 */
1528 void
pmap_zero_page(struct vm_page * pg)1529 pmap_zero_page(struct vm_page *pg)
1530 {
1531 paddr_t pa = VM_PAGE_TO_PHYS(pg);
1532
1533 DPRINTF(CD_ZERO, ("pmap_zero_page(%p) pa %lx\n", pg, pa));
1534 curcpu()->ci_zeropage((vaddr_t)pa);
1535
1536 if (KERNEL_APR_CMODE == CACHE_DFL)
1537 cmmu_dcache_wb(cpu_number(), pa, PAGE_SIZE);
1538 }
1539
1540 /*
1541 * [INTERNAL]
1542 * Alters bits in the pte of all mappings of `pg'. For each pte, bits in
1543 * `set' are set and bits not in `mask' are cleared. The flags summary
1544 * at the head of the pv list is modified in a similar way.
1545 */
1546 void
pmap_changebit(struct vm_page * pg,int set,int mask)1547 pmap_changebit(struct vm_page *pg, int set, int mask)
1548 {
1549 pv_entry_t head, pvep;
1550 pt_entry_t *pte, npte, opte;
1551 pmap_t pmap;
1552 int s;
1553 vaddr_t va;
1554
1555 DPRINTF(CD_CBIT, ("pmap_changebit(%p, %x, %x)\n", pg, set, mask));
1556
1557 s = splvm();
1558
1559 /*
1560 * Clear saved attributes (modify, reference)
1561 */
1562 pg->mdpage.pv_flags &= mask;
1563
1564 head = pg_to_pvh(pg);
1565 if (head->pv_pmap != NULL) {
1566 /* for each listed pmap, update the affected bits */
1567 for (pvep = head; pvep != NULL; pvep = pvep->pv_next) {
1568 pmap = pvep->pv_pmap;
1569 va = pvep->pv_va;
1570 pte = pmap_pte(pmap, va);
1571
1572 /*
1573 * Check for existing and valid pte
1574 */
1575 if (pte == NULL || !PDT_VALID(pte))
1576 continue; /* no page mapping */
1577 #ifdef PMAPDEBUG
1578 if (ptoa(PG_PFNUM(*pte)) != VM_PAGE_TO_PHYS(pg))
1579 panic("pmap_changebit: pte %08x in pmap %p doesn't point to page %p@%lx",
1580 *pte, pmap, pg, VM_PAGE_TO_PHYS(pg));
1581 #endif
1582
1583 /*
1584 * Update bits
1585 */
1586 opte = *pte;
1587 npte = (opte | set) & mask;
1588
1589 /*
1590 * Invalidate pte temporarily to avoid the modified bit
1591 * and/or the reference being written back by any other
1592 * cpu.
1593 */
1594 if (npte != opte) {
1595 invalidate_pte(pte);
1596 *pte = npte;
1597 tlb_flush(pmap, va, npte);
1598 }
1599 }
1600 }
1601
1602 splx(s);
1603 }
1604
1605 /*
1606 * [INTERNAL]
1607 * Checks for `bit' being set in at least one pte of all mappings of `pg'.
1608 * The flags summary at the head of the pv list is checked first, and will
1609 * be set if it wasn't but the bit is found set in one pte.
1610 * Returns TRUE if the bit is found, FALSE if not.
1611 */
1612 boolean_t
pmap_testbit(struct vm_page * pg,int bit)1613 pmap_testbit(struct vm_page *pg, int bit)
1614 {
1615 pv_entry_t head, pvep;
1616 pt_entry_t *pte;
1617 pmap_t pmap;
1618 int s;
1619
1620 DPRINTF(CD_TBIT, ("pmap_testbit(%p, %x): ", pg, bit));
1621
1622 s = splvm();
1623
1624 if (pg->mdpage.pv_flags & bit) {
1625 /* we've already cached this flag for this page,
1626 no use looking further... */
1627 DPRINTF(CD_TBIT, ("cached\n"));
1628 splx(s);
1629 return (TRUE);
1630 }
1631
1632 head = pg_to_pvh(pg);
1633 if (head->pv_pmap != NULL) {
1634 /* for each listed pmap, check modified bit for given page */
1635 for (pvep = head; pvep != NULL; pvep = pvep->pv_next) {
1636 pmap = pvep->pv_pmap;
1637
1638 pte = pmap_pte(pmap, pvep->pv_va);
1639 if (pte == NULL || !PDT_VALID(pte))
1640 continue;
1641
1642 #ifdef PMAPDEBUG
1643 if (ptoa(PG_PFNUM(*pte)) != VM_PAGE_TO_PHYS(pg))
1644 panic("pmap_testbit: pte %08x in pmap %p doesn't point to page %p@%lx",
1645 *pte, pmap, pg, VM_PAGE_TO_PHYS(pg));
1646 #endif
1647
1648 if ((*pte & bit) != 0) {
1649 pg->mdpage.pv_flags |= bit;
1650 DPRINTF(CD_TBIT, ("found\n"));
1651 splx(s);
1652 return (TRUE);
1653 }
1654 }
1655 }
1656
1657 DPRINTF(CD_TBIT, ("not found\n"));
1658 splx(s);
1659 return (FALSE);
1660 }
1661
1662 /*
1663 * [INTERNAL]
1664 * Clears `bit' in the pte of all mapping of `pg', as well as in the flags
1665 * summary at the head of the pv list.
1666 * Returns TRUE if the bit was found set in either a mapping or the summary,
1667 * FALSE if not.
1668 */
1669 boolean_t
pmap_unsetbit(struct vm_page * pg,int bit)1670 pmap_unsetbit(struct vm_page *pg, int bit)
1671 {
1672 boolean_t rv = FALSE;
1673 pv_entry_t head, pvep;
1674 pt_entry_t *pte, opte, npte;
1675 pmap_t pmap;
1676 int s;
1677 vaddr_t va;
1678
1679 DPRINTF(CD_USBIT, ("pmap_unsetbit(%p, %x): ", pg, bit));
1680
1681 s = splvm();
1682
1683 /*
1684 * Clear saved attributes
1685 */
1686 if (pg->mdpage.pv_flags & bit) {
1687 pg->mdpage.pv_flags ^= bit;
1688 rv = TRUE;
1689 }
1690
1691 head = pg_to_pvh(pg);
1692 if (head->pv_pmap != NULL) {
1693 /* for each listed pmap, update the specified bit */
1694 for (pvep = head; pvep != NULL; pvep = pvep->pv_next) {
1695 pmap = pvep->pv_pmap;
1696 va = pvep->pv_va;
1697 pte = pmap_pte(pmap, va);
1698
1699 /*
1700 * Check for existing and valid pte
1701 */
1702 if (pte == NULL || !PDT_VALID(pte))
1703 continue; /* no page mapping */
1704 #ifdef PMAPDEBUG
1705 if (ptoa(PG_PFNUM(*pte)) != VM_PAGE_TO_PHYS(pg))
1706 panic("pmap_unsetbit: pte %08x in pmap %p doesn't point to page %p@%lx",
1707 *pte, pmap, pg, VM_PAGE_TO_PHYS(pg));
1708 #endif
1709
1710 /*
1711 * Update bits
1712 */
1713 opte = *pte;
1714 if (opte & bit) {
1715 /*
1716 * Invalidate pte temporarily to avoid the
1717 * specified bit being written back by any
1718 * other cpu.
1719 */
1720 invalidate_pte(pte);
1721 npte = opte ^ bit;
1722 *pte = npte;
1723 tlb_flush(pmap, va, npte);
1724 rv = TRUE;
1725 }
1726 }
1727 }
1728 splx(s);
1729
1730 DPRINTF(CD_USBIT, (rv ? "TRUE\n" : "FALSE\n"));
1731 return (rv);
1732 }
1733
1734 /*
1735 * [MI]
1736 * Checks whether `pg' is dirty.
1737 * Returns TRUE if there is at least one mapping of `pg' with the modified
1738 * bit set in its pte, FALSE if not.
1739 */
1740 boolean_t
pmap_is_modified(struct vm_page * pg)1741 pmap_is_modified(struct vm_page *pg)
1742 {
1743 #ifdef M88110
1744 /*
1745 * Since on 88110 PG_M bit tracking is done in software, we can
1746 * trust the page flags without having to walk the individual
1747 * ptes in case the page flags are behind actual usage.
1748 */
1749 if (CPU_IS88110) {
1750 boolean_t rc = FALSE;
1751
1752 if (pg->mdpage.pv_flags & PG_M)
1753 rc = TRUE;
1754 DPRINTF(CD_TBIT, ("pmap_is_modified(%p) -> %x\n", pg, rc));
1755 return (rc);
1756 }
1757 #endif
1758
1759 return pmap_testbit(pg, PG_M);
1760 }
1761
1762 /*
1763 * [MI]
1764 * Checks whether `pg' is in use.
1765 * Returns TRUE if there is at least one mapping of `pg' with the used bit
1766 * set in its pte, FALSE if not.
1767 */
1768 boolean_t
pmap_is_referenced(struct vm_page * pg)1769 pmap_is_referenced(struct vm_page *pg)
1770 {
1771 return pmap_testbit(pg, PG_U);
1772 }
1773
1774 /*
1775 * [MI]
1776 * Strengthens protection of `pg' to `prot'.
1777 */
1778 void
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)1779 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1780 {
1781 if ((prot & PROT_READ) == PROT_NONE)
1782 pmap_remove_page(pg);
1783 else if ((prot & PROT_WRITE) == PROT_NONE)
1784 pmap_changebit(pg, PG_RO, ~0);
1785 }
1786
1787 /*
1788 * Miscellaneous routines
1789 */
1790
1791 /*
1792 * [INTERNAL]
1793 * Writeback the data cache for the given page, on all processors.
1794 */
1795 void
pmap_clean_page(paddr_t pa)1796 pmap_clean_page(paddr_t pa)
1797 {
1798 struct cpu_info *ci;
1799 #ifdef MULTIPROCESSOR
1800 CPU_INFO_ITERATOR cpu;
1801 #endif
1802
1803 if (KERNEL_APR_CMODE != CACHE_DFL && USERLAND_APR_CMODE != CACHE_DFL)
1804 return;
1805
1806 #ifdef MULTIPROCESSOR
1807 CPU_INFO_FOREACH(cpu, ci)
1808 #else
1809 ci = curcpu();
1810 #endif
1811 /* CPU_INFO_FOREACH(cpu, ci) */
1812 cmmu_dcache_wb(ci->ci_cpuid, pa, PAGE_SIZE);
1813 }
1814
1815 /*
1816 * [MI]
1817 * Flushes instruction cache for the range `va'..`va'+`len' in proc `p'.
1818 */
1819 void
pmap_proc_iflush(struct process * pr,vaddr_t va,vsize_t len)1820 pmap_proc_iflush(struct process *pr, vaddr_t va, vsize_t len)
1821 {
1822 pmap_t pmap = vm_map_pmap(&pr->ps_vmspace->vm_map);
1823 paddr_t pa;
1824 vsize_t count;
1825 struct cpu_info *ci;
1826
1827 if (KERNEL_APR_CMODE != CACHE_DFL && USERLAND_APR_CMODE != CACHE_DFL)
1828 return;
1829
1830 while (len != 0) {
1831 count = min(len, PAGE_SIZE - (va & PAGE_MASK));
1832 if (pmap_extract(pmap, va, &pa)) {
1833 #ifdef MULTIPROCESSOR
1834 CPU_INFO_ITERATOR cpu;
1835
1836 CPU_INFO_FOREACH(cpu, ci)
1837 #else
1838 ci = curcpu();
1839 #endif
1840 /* CPU_INFO_FOREACH(cpu, ci) */ {
1841 cmmu_dcache_wb(ci->ci_cpuid, pa, count);
1842 /* XXX this should not be necessary, */
1843 /* XXX I$ is configured to snoop D$ */
1844 cmmu_icache_inv(ci->ci_cpuid, pa, count);
1845 }
1846 }
1847 va += count;
1848 len -= count;
1849 }
1850 }
1851
1852 #ifdef M88110
1853 /*
1854 * [INTERNAL]
1855 * Updates the pte mapping `va' in `pmap' upon write fault, to set the
1856 * modified bit in the pte (the 88110 MMU doesn't do this and relies upon
1857 * the kernel to achieve this).
1858 * Returns TRUE if the page was indeed writeable but not marked as dirty,
1859 * FALSE if this is a genuine write fault.
1860 */
1861 int
pmap_set_modify(pmap_t pmap,vaddr_t va)1862 pmap_set_modify(pmap_t pmap, vaddr_t va)
1863 {
1864 pt_entry_t *pte;
1865 paddr_t pa;
1866 vm_page_t pg;
1867
1868 pte = pmap_pte(pmap, va);
1869 #ifdef DEBUG
1870 if (pte == NULL)
1871 panic("NULL pte on write fault??");
1872 #endif
1873
1874 /* Not a first write to a writable page */
1875 if ((*pte & (PG_M | PG_RO)) != 0)
1876 return (FALSE);
1877
1878 /* Mark the page as dirty */
1879 *pte |= PG_M;
1880 pa = *pte & PG_FRAME;
1881 pg = PHYS_TO_VM_PAGE(pa);
1882 #ifdef DIAGNOSTIC
1883 if (pg == NULL)
1884 panic("Write fault to unmanaged page %p", (void *)pa);
1885 #endif
1886
1887 pg->mdpage.pv_flags |= PG_M_U;
1888
1889 tlb_flush(pmap, va, *pte);
1890
1891 return (TRUE);
1892 }
1893 #endif
1894
1895 /*
1896 * [MD PUBLIC]
1897 * Change the cache control bits of the address range `sva'..`eva' in
1898 * pmap_kernel to `mode'.
1899 */
1900 void
pmap_cache_ctrl(vaddr_t sva,vaddr_t eva,u_int mode)1901 pmap_cache_ctrl(vaddr_t sva, vaddr_t eva, u_int mode)
1902 {
1903 int s;
1904 pt_entry_t *pte, opte, npte;
1905 vaddr_t va;
1906 paddr_t pa;
1907 cpuid_t cpu;
1908
1909 DPRINTF(CD_CACHE, ("pmap_cache_ctrl(%lx, %lx, %x)\n",
1910 sva, eva, mode));
1911
1912 s = splvm();
1913 for (va = sva; va != eva; va += PAGE_SIZE) {
1914 if ((pte = pmap_pte(pmap_kernel(), va)) == NULL)
1915 continue;
1916 DPRINTF(CD_CACHE, ("cache_ctrl: pte@%p\n", pte));
1917
1918 /*
1919 * Data cache should be copied back and invalidated if
1920 * the old mapping was cached and the new isn't, or if
1921 * we are downgrading from writeback to writethrough.
1922 */
1923 if (((*pte & CACHE_INH) == 0 && (mode & CACHE_INH) != 0) ||
1924 ((*pte & CACHE_WT) == 0 && (mode & CACHE_WT) != 0)) {
1925 pa = ptoa(PG_PFNUM(*pte));
1926 #ifdef MULTIPROCESSOR
1927 for (cpu = 0; cpu < MAX_CPUS; cpu++)
1928 if (ISSET(m88k_cpus[cpu].ci_flags, CIF_ALIVE)) {
1929 #else
1930 cpu = cpu_number();
1931 #endif
1932 if (mode & CACHE_INH)
1933 cmmu_cache_wbinv(cpu,
1934 pa, PAGE_SIZE);
1935 else if (KERNEL_APR_CMODE == CACHE_DFL ||
1936 USERLAND_APR_CMODE == CACHE_DFL)
1937 cmmu_dcache_wb(cpu,
1938 pa, PAGE_SIZE);
1939 #ifdef MULTIPROCESSOR
1940 }
1941 #endif
1942 }
1943
1944 /*
1945 * Invalidate pte temporarily to avoid being written back
1946 * the modified bit and/or the reference bit by any other cpu.
1947 */
1948
1949 opte = invalidate_pte(pte);
1950 npte = (opte & ~CACHE_MASK) | mode;
1951 *pte = npte;
1952 tlb_kflush(va, npte);
1953 }
1954 splx(s);
1955 }
1956
1957 /*
1958 * [MD PUBLIC]
1959 * Change the cache control bits of all mappings of the given physical page to
1960 * disable cached accesses.
1961 */
1962 void
pmap_page_uncache(paddr_t pa)1963 pmap_page_uncache(paddr_t pa)
1964 {
1965 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
1966 struct pmap *pmap;
1967 pv_entry_t head, pvep;
1968 pt_entry_t *pte, opte, npte;
1969 vaddr_t va;
1970 int s;
1971
1972 s = splvm();
1973 head = pg_to_pvh(pg);
1974 if (head->pv_pmap != NULL) {
1975 for (pvep = head; pvep != NULL; pvep = pvep->pv_next) {
1976 pmap = pvep->pv_pmap;
1977 va = pvep->pv_va;
1978 pte = pmap_pte(pmap, va);
1979
1980 if (pte == NULL || !PDT_VALID(pte))
1981 continue; /* no page mapping */
1982 opte = *pte;
1983 if ((opte & CACHE_MASK) != CACHE_INH) {
1984 /*
1985 * Skip the direct mapping; it will be changed
1986 * by the pmap_cache_ctrl() call below.
1987 */
1988 if (pmap == pmap_kernel() && va == pa)
1989 continue;
1990 /*
1991 * Invalidate pte temporarily to avoid the
1992 * specified bit being written back by any
1993 * other cpu.
1994 */
1995 invalidate_pte(pte);
1996 npte = (opte & ~CACHE_MASK) | CACHE_INH;
1997 *pte = npte;
1998 tlb_flush(pmap, va, npte);
1999 }
2000 }
2001 }
2002 splx(s);
2003 pmap_cache_ctrl(pa, pa + PAGE_SIZE, CACHE_INH);
2004 }
2005
2006 /*
2007 * [MI]
2008 * Marks a "direct" page as unused.
2009 */
2010 vm_page_t
pmap_unmap_direct(vaddr_t va)2011 pmap_unmap_direct(vaddr_t va)
2012 {
2013 paddr_t pa = (paddr_t)va;
2014 vm_page_t pg = PHYS_TO_VM_PAGE(pa);
2015
2016 pmap_clean_page(pa);
2017
2018 return pg;
2019 }
2020