1 /*	$NetBSD: pmap_machdep.c,v 1.1 2016/07/11 16:15:36 matt Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center and by Chris G. Demetriou.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1992, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * This code is derived from software contributed to Berkeley by
38  * the Systems Programming Group of the University of Utah Computer
39  * Science Department and Ralph Campbell.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
66  */
67 
68 #include <sys/cdefs.h>
69 
70 __KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.1 2016/07/11 16:15:36 matt Exp $");
71 
72 /*
73  *	Manages physical address maps.
74  *
75  *	In addition to hardware address maps, this
76  *	module is called upon to provide software-use-only
77  *	maps which may or may not be stored in the same
78  *	form as hardware maps.  These pseudo-maps are
79  *	used to store intermediate results from copy
80  *	operations to and from address spaces.
81  *
82  *	Since the information managed by this module is
83  *	also stored by the logical address mapping module,
84  *	this module may throw away valid virtual-to-physical
85  *	mappings at almost any time.  However, invalidations
86  *	of virtual-to-physical mappings must be done as
87  *	requested.
88  *
89  *	In order to cope with hardware architectures which
90  *	make virtual-to-physical map invalidates expensive,
91  *	this module may delay invalidate or reduced protection
92  *	operations until such time as they are actually
93  *	necessary.  This module is given full information as
94  *	to which processors are currently using which maps,
95  *	and to when physical maps must be made correct.
96  */
97 
98 /* XXX simonb 2002/02/26
99  *
100  * MIPS3_PLUS is used to conditionally compile the r4k MMU support.
101  * This is bogus - for example, some IDT MIPS-II CPUs have r4k style
102  * MMUs (and 32-bit ones at that).
103  *
104  * On the other hand, it's not likely that we'll ever support the R6000
105  * (is it?), so maybe that can be an "if MIPS2 or greater" check.
106  *
107  * Also along these lines are using totally separate functions for
108  * r3k-style and r4k-style MMUs and removing all the MIPS_HAS_R4K_MMU
109  * checks in the current functions.
110  *
111  * These warnings probably applies to other files under sys/arch/mips.
112  */
113 
114 #include "opt_sysv.h"
115 #include "opt_cputype.h"
116 #include "opt_multiprocessor.h"
117 #include "opt_mips_cache.h"
118 
119 #define __MUTEX_PRIVATE
120 #define __PMAP_PRIVATE
121 
122 #include <sys/param.h>
123 #include <sys/atomic.h>
124 #include <sys/buf.h>
125 #include <sys/cpu.h>
126 #include <sys/kernel.h>
127 #include <sys/mutex.h>
128 #include <sys/pool.h>
129 #include <sys/proc.h>
130 #include <sys/systm.h>
131 #ifdef SYSVSHM
132 #include <sys/shm.h>
133 #endif
134 
135 #include <uvm/uvm.h>
136 
137 #include <mips/cache.h>
138 #include <mips/cpuregs.h>
139 #include <mips/locore.h>
140 #include <mips/pte.h>
141 
142 CTASSERT(MIPS_KSEG0_START < 0);
143 CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG0(0x1000) < 0);
144 CTASSERT(MIPS_KSEG1_START < 0);
145 CTASSERT((intptr_t)MIPS_PHYS_TO_KSEG1(0x1000) < 0);
146 CTASSERT(MIPS_KSEG2_START < 0);
147 CTASSERT(MIPS_MAX_MEM_ADDR < 0);
148 CTASSERT(MIPS_RESERVED_ADDR < 0);
149 CTASSERT((uint32_t)MIPS_KSEG0_START == 0x80000000);
150 CTASSERT((uint32_t)MIPS_KSEG1_START == 0xa0000000);
151 CTASSERT((uint32_t)MIPS_KSEG2_START == 0xc0000000);
152 CTASSERT((uint32_t)MIPS_MAX_MEM_ADDR == 0xbe000000);
153 CTASSERT((uint32_t)MIPS_RESERVED_ADDR == 0xbfc80000);
154 CTASSERT(MIPS_KSEG0_P(MIPS_PHYS_TO_KSEG0(0)));
155 CTASSERT(MIPS_KSEG1_P(MIPS_PHYS_TO_KSEG1(0)));
156 #ifdef _LP64
157 CTASSERT(VM_MIN_KERNEL_ADDRESS % NBXSEG == 0);
158 #else
159 CTASSERT(VM_MIN_KERNEL_ADDRESS % NBSEG == 0);
160 #endif
161 
162 //PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
163 PMAP_COUNTER(zeroed_pages, "pages zeroed");
164 PMAP_COUNTER(copied_pages, "pages copied");
165 extern struct evcnt pmap_evcnt_page_cache_evictions;
166 
167 static register_t
pmap_md_map_ephemeral_page(struct vm_page * pg,bool locked_p,int prot,pt_entry_t * old_pte_p)168 pmap_md_map_ephemeral_page(struct vm_page *pg, bool locked_p, int prot,
169     pt_entry_t *old_pte_p)
170 {
171 	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
172 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
173 	pv_entry_t pv = &mdpg->mdpg_first;
174 	register_t va = 0;
175 
176 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
177 	UVMHIST_LOG(pmaphist, "(pg=%p, prot=%d, ptep=%p)",
178 	    pg, prot, old_pte_p, 0);
179 
180 	KASSERT(locked_p || !VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
181 	KASSERT(!locked_p || VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
182 
183 	if (!MIPS_CACHE_VIRTUAL_ALIAS || !mips_cache_badalias(pv->pv_va, pa)) {
184 #ifndef __mips_o32
185 		va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
186 #else
187 		if (pa < MIPS_PHYS_MASK) {
188 			va = MIPS_PHYS_TO_KSEG0(pa);
189 		}
190 #endif
191 	}
192 	if (va == 0) {
193 		/*
194 		 * Make sure to use a congruent mapping to the last mapped
195 		 * address so we don't have to worry about virtual aliases.
196 		 */
197 		kpreempt_disable(); // paired with the one in unmap
198 		struct cpu_info * const ci = curcpu();
199 		KASSERT(ci->ci_pmap_dstbase != 0);
200 
201 		va = (prot & VM_PROT_WRITE
202 			? ci->ci_pmap_dstbase
203 			: ci->ci_pmap_srcbase)
204 		    + mips_cache_indexof(MIPS_CACHE_VIRTUAL_ALIAS
205 			? pv->pv_va
206 			: pa);
207 
208 		/*
209 		 * Now to make and write the new PTE to map the PA.
210 		 */
211 		const pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, 0);
212 		pt_entry_t * const ptep = pmap_pte_lookup(pmap_kernel(), va);
213 		*old_pte_p = *ptep;		// save
214 		bool rv __diagused;
215 		*ptep = npte;			// update page table
216 
217 		// update the TLB directly making sure we force the new entry
218 		// into it.
219 		rv = tlb_update_addr(va, KERNEL_PID, npte, true);
220 		KASSERTMSG(rv == 1, "va %#"PRIxREGISTER" pte=%#"PRIxPTE" rv=%d",
221 		    va, pte_value(npte), rv);
222 	}
223 	if (MIPS_CACHE_VIRTUAL_ALIAS) {
224 		/*
225 		 * If we are forced to use an incompatible alias, flush the
226 		 * page from the cache so we will copy the correct contents.
227 		 */
228 		if (!locked_p)
229 			(void)VM_PAGEMD_PVLIST_READLOCK(mdpg);
230 		if (VM_PAGEMD_CACHED_P(mdpg)
231 		    && mips_cache_badalias(pv->pv_va, va)) {
232 			mips_dcache_wbinv_range_index(trunc_page(pv->pv_va),
233 			    PAGE_SIZE);
234 			/*
235 			 * If there is no active mapping, remember this new one.
236 			 */
237 			if (pv->pv_pmap == NULL)
238 				pv->pv_va = va;
239 		}
240 		if (!locked_p)
241 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
242 	}
243 
244 	UVMHIST_LOG(pmaphist, " <-- done (va=%#lx)", va, 0, 0, 0);
245 
246 	return va;
247 }
248 
249 static void
pmap_md_unmap_ephemeral_page(struct vm_page * pg,bool locked_p,register_t va,pt_entry_t old_pte)250 pmap_md_unmap_ephemeral_page(struct vm_page *pg, bool locked_p, register_t va,
251 	pt_entry_t old_pte)
252 {
253 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
254 	pv_entry_t pv = &mdpg->mdpg_first;
255 
256 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
257 	UVMHIST_LOG(pmaphist, "(pg=%p, va=%#lx, pte=%#"PRIxPTE")",
258 	    pg, va, pte_value(old_pte), 0);
259 
260 	KASSERT(locked_p || !VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
261 	KASSERT(!locked_p || VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
262 
263 	if (MIPS_CACHE_VIRTUAL_ALIAS) {
264 		if (!locked_p)
265 			(void)VM_PAGEMD_PVLIST_READLOCK(mdpg);
266 		/*
267 		 * If this page was previously uncached or we had to use an
268 		 * incompatible alias, flush it from the cache.
269 		 */
270 		if (VM_PAGEMD_UNCACHED_P(mdpg)
271 		    || (pv->pv_pmap != NULL
272 			&& mips_cache_badalias(pv->pv_va, va))) {
273 			mips_dcache_wbinv_range(va, PAGE_SIZE);
274 		}
275 		if (!locked_p)
276 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
277 	}
278 	/*
279 	 * If we had to map using a page table entry, restore it now.
280 	 */
281 	if (!pmap_md_direct_mapped_vaddr_p(va)) {
282 		*pmap_pte_lookup(pmap_kernel(), va) = old_pte;
283 		if (pte_valid_p(old_pte)) {
284 			// Update the TLB with the old mapping.
285 			tlb_update_addr(va, KERNEL_PID, old_pte, 0);
286 		} else {
287 			// Invalidate TLB entry if the old pte wasn't valid.
288 			tlb_invalidate_addr(va, KERNEL_PID);
289 		}
290 		kpreempt_enable();	// Restore preemption
291 	}
292 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
293 }
294 
295 static void
pmap_md_vca_page_wbinv(struct vm_page * pg,bool locked_p)296 pmap_md_vca_page_wbinv(struct vm_page *pg, bool locked_p)
297 {
298 	pt_entry_t pte;
299 
300 	const register_t va = pmap_md_map_ephemeral_page(pg, locked_p,
301 	    VM_PROT_READ, &pte);
302 
303 	mips_dcache_wbinv_range(va, PAGE_SIZE);
304 
305 	pmap_md_unmap_ephemeral_page(pg, locked_p, va, pte);
306 }
307 
308 bool
pmap_md_ok_to_steal_p(const struct vm_physseg * seg,size_t npgs)309 pmap_md_ok_to_steal_p(const struct vm_physseg *seg, size_t npgs)
310 {
311 #ifndef _LP64
312 	if (seg->avail_start + npgs >= atop(MIPS_PHYS_MASK + 1)) {
313 		aprint_debug("%s: seg %zu: not enough in KSEG0 for %zu pages\n",
314 		    __func__, seg - VM_PHYSMEM_PTR(0), npgs);
315 		return false;
316 	}
317 #endif
318 	return true;
319 }
320 
321 /*
322  *	Bootstrap the system enough to run with virtual memory.
323  *	firstaddr is the first unused kseg0 address (not page aligned).
324  */
325 void
pmap_bootstrap(void)326 pmap_bootstrap(void)
327 {
328 	vsize_t bufsz;
329 	size_t sysmap_size;
330 	pt_entry_t *sysmap;
331 
332 	if (MIPS_CACHE_VIRTUAL_ALIAS && uvmexp.ncolors)
333 		pmap_page_colormask = (uvmexp.ncolors - 1) << PAGE_SHIFT;
334 
335 #ifdef MULTIPROCESSOR
336 	pmap_t pm = pmap_kernel();
337 	kcpuset_create(&pm->pm_onproc, true);
338 	kcpuset_create(&pm->pm_active, true);
339 	KASSERT(pm->pm_onproc != NULL);
340 	KASSERT(pm->pm_active != NULL);
341 	kcpuset_set(pm->pm_onproc, cpu_number());
342 	kcpuset_set(pm->pm_active, cpu_number());
343 #endif
344 	pmap_tlb_info_init(&pmap_tlb0_info);		/* init the lock */
345 
346 	/*
347 	 * Compute the number of pages kmem_arena will have.
348 	 */
349 	kmeminit_nkmempages();
350 
351 	/*
352 	 * Figure out how many PTE's are necessary to map the kernel.
353 	 * We also reserve space for kmem_alloc_pageable() for vm_fork().
354 	 */
355 
356 	/* Get size of buffer cache and set an upper limit */
357 	buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
358 	bufsz = buf_memcalc();
359 	buf_setvalimit(bufsz);
360 
361 	sysmap_size = (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) +
362 	    bufsz + 16 * NCARGS + pager_map_size) / NBPG +
363 	    (maxproc * UPAGES) + nkmempages;
364 #ifdef DEBUG
365 	{
366 		extern int kmem_guard_depth;
367 		sysmap_size += kmem_guard_depth;
368 	}
369 #endif
370 
371 #ifdef SYSVSHM
372 	sysmap_size += shminfo.shmall;
373 #endif
374 #ifdef KSEG2IOBUFSIZE
375 	sysmap_size += (KSEG2IOBUFSIZE >> PGSHIFT);
376 #endif
377 #ifdef _LP64
378 	/*
379 	 * If we are using tmpfs, then we might want to use a great deal of
380 	 * our memory with it.  Make sure we have enough VM to do that.
381 	 */
382 	sysmap_size += physmem;
383 #else
384 	/* XXX: else runs out of space on 256MB sbmips!! */
385 	sysmap_size += 20000;
386 #endif
387 	/* Rounup to a even number of pte page tables */
388 	sysmap_size = (sysmap_size + NPTEPG - 1) & -NPTEPG;
389 
390 	/*
391 	 * Initialize `FYI' variables.	Note we're relying on
392 	 * the fact that BSEARCH sorts the vm_physmem[] array
393 	 * for us.  Must do this before uvm_pageboot_alloc()
394 	 * can be called.
395 	 */
396 	pmap_limits.avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
397 	pmap_limits.avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
398 	pmap_limits.virtual_end = pmap_limits.virtual_start + (vaddr_t)sysmap_size * NBPG;
399 
400 #ifndef _LP64
401 	if (pmap_limits.virtual_end > VM_MAX_KERNEL_ADDRESS
402 	    || pmap_limits.virtual_end < VM_MIN_KERNEL_ADDRESS) {
403 		printf("%s: changing last kernel VA from %#"PRIxVADDR
404 		    " to %#"PRIxVADDR"\n", __func__,
405 		    pmap_limits.virtual_end, VM_MAX_KERNEL_ADDRESS);
406 		pmap_limits.virtual_end = VM_MAX_KERNEL_ADDRESS;
407 		sysmap_size =
408 		    (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / NBPG;
409 	}
410 #endif
411 	pmap_pvlist_lock_init(mips_cache_info.mci_pdcache_line_size);
412 
413 	/*
414 	 * Now actually allocate the kernel PTE array (must be done
415 	 * after pmap_limits.virtual_end is initialized).
416 	 */
417 	sysmap = (pt_entry_t *)
418 	    uvm_pageboot_alloc(sizeof(pt_entry_t) * sysmap_size);
419 
420 	vaddr_t va = VM_MIN_KERNEL_ADDRESS;
421 #ifdef _LP64
422 	/*
423 	 * Do we need more than one XSEG's worth virtual address space?
424 	 * If so, we have to allocate the additional pmap_segtab_t's for them
425 	 * and insert them into the kernel's top level segtab.
426 	 */
427 	const size_t xsegs = (sysmap_size * NBPG + NBXSEG - 1) / NBXSEG;
428 	if (xsegs > 1) {
429 		printf("%s: %zu xsegs required for %zu pages\n",
430 		    __func__, xsegs, sysmap_size);
431 		pmap_segtab_t *stp = (pmap_segtab_t *)
432 		    uvm_pageboot_alloc(sizeof(pmap_segtab_t) * (xsegs - 1));
433 		for (size_t i = 1; i <= xsegs; i++, stp++) {
434 			pmap_kern_segtab.seg_seg[i] = stp;
435 		}
436 	}
437 	pmap_segtab_t ** const xstp = pmap_kern_segtab.seg_seg;
438 #else
439 	const size_t xsegs = 1;
440 	pmap_segtab_t * const stp = &pmap_kern_segtab;
441 #endif
442 	KASSERT(curcpu()->ci_pmap_kern_segtab == &pmap_kern_segtab);
443 
444 	for (size_t k = 0, i = 0; k < xsegs; k++) {
445 #ifdef _LP64
446 		pmap_segtab_t * const stp =
447 		    xstp[(va >> XSEGSHIFT) & (NSEGPG - 1)];
448 #endif
449 		bool done = false;
450 
451 		for (size_t j = (va >> SEGSHIFT) & (NSEGPG - 1);
452 		     !done && i < sysmap_size;
453 		     i += NPTEPG, j++, va += NBSEG) {
454 			/*
455 			 * Now set the page table pointer...
456 			 */
457 			stp->seg_tab[j] = &sysmap[i];
458 #ifdef _LP64
459 			/*
460 			 * If we are at end of this XSEG, terminate the loop
461 			 * so we advance to the next one.
462 			 */
463 			done = (j + 1 == NSEGPG);
464 #endif
465 		}
466 	}
467 	KASSERT(pmap_pte_lookup(pmap_kernel(), VM_MIN_KERNEL_ADDRESS) == sysmap);
468 
469 	/*
470 	 * Initialize the pools.
471 	 */
472 	pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
473 	    &pool_allocator_nointr, IPL_NONE);
474 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
475 	    &pmap_pv_page_allocator, IPL_NONE);
476 
477 	tlb_set_asid(0);
478 
479 #ifdef MIPS3_PLUS	/* XXX mmu XXX */
480 	/*
481 	 * The R4?00 stores only one copy of the Global bit in the
482 	 * translation lookaside buffer for each 2 page entry.
483 	 * Thus invalid entries must have the Global bit set so
484 	 * when Entry LO and Entry HI G bits are anded together
485 	 * they will produce a global bit to store in the tlb.
486 	 */
487 	if (MIPS_HAS_R4K_MMU) {
488 		while (sysmap_size-- > 0) {
489 			*sysmap++ = MIPS3_PG_G;
490 		}
491 	}
492 #endif	/* MIPS3_PLUS */
493 }
494 
495 void
pmap_md_alloc_ephemeral_address_space(struct cpu_info * ci)496 pmap_md_alloc_ephemeral_address_space(struct cpu_info *ci)
497 {
498 	struct mips_cache_info * const mci = &mips_cache_info;
499 
500 	/*
501 	 * If we have more memory than can be mapped by KSEG0, we need to
502 	 * allocate enough VA so we can map pages with the right color
503 	 * (to avoid cache alias problems).
504 	 */
505 	if (false
506 #ifndef _LP64
507 	    || pmap_limits.avail_end > MIPS_KSEG1_START - MIPS_KSEG0_START
508 #endif
509 	    || MIPS_CACHE_VIRTUAL_ALIAS
510 	    || MIPS_ICACHE_VIRTUAL_ALIAS) {
511 		vsize_t size = uvmexp.ncolors * PAGE_SIZE;
512 		if (MIPS_ICACHE_VIRTUAL_ALIAS
513 		    && mci->mci_picache_way_size > size)
514 			size = mci->mci_picache_way_size;
515 		ci->ci_pmap_dstbase = uvm_km_alloc(kernel_map, size, 0,
516 		    UVM_KMF_COLORMATCH | UVM_KMF_VAONLY);
517 		KASSERT(ci->ci_pmap_dstbase);
518 		ci->ci_pmap_srcbase = uvm_km_alloc(kernel_map, size, 0,
519 		    UVM_KMF_COLORMATCH | UVM_KMF_VAONLY);
520 		KASSERT(ci->ci_pmap_srcbase);
521 	}
522 }
523 
524 void
pmap_md_init(void)525 pmap_md_init(void)
526 {
527 	pmap_md_alloc_ephemeral_address_space(curcpu());
528 
529 #if defined(MIPS3) && 0
530 	if (MIPS_HAS_R4K_MMU) {
531 		/*
532 		 * XXX
533 		 * Disable sosend_loan() in src/sys/kern/uipc_socket.c
534 		 * on MIPS3 CPUs to avoid possible virtual cache aliases
535 		 * and uncached mappings in pmap_enter_pv().
536 		 *
537 		 * Ideally, read only shared mapping won't cause aliases
538 		 * so pmap_enter_pv() should handle any shared read only
539 		 * mappings without uncached ops like ARM pmap.
540 		 *
541 		 * On the other hand, R4000 and R4400 have the virtual
542 		 * coherency exceptions which will happen even on read only
543 		 * mappings, so we always have to disable sosend_loan()
544 		 * on such CPUs.
545 		 */
546 		sock_loan_thresh = -1;
547 	}
548 #endif
549 }
550 
551 /*
552  * XXXJRT -- need a version for each cache type.
553  */
554 void
pmap_procwr(struct proc * p,vaddr_t va,size_t len)555 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
556 {
557 	if (MIPS_HAS_R4K_MMU) {
558 		/*
559 		 * XXX
560 		 * shouldn't need to do this for physical d$?
561 		 * should need to do this for virtual i$ if prot == EXEC?
562 		 */
563 		if (p == curlwp->l_proc
564 		    && mips_cache_info.mci_pdcache_way_mask < PAGE_SIZE)
565 		    /* XXX check icache mask too? */
566 			mips_icache_sync_range(va, len);
567 		else
568 			mips_icache_sync_range_index(va, len);
569 	} else {
570 		pmap_t pmap = p->p_vmspace->vm_map.pmap;
571 		kpreempt_disable();
572 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
573 		pt_entry_t entry = (ptep != NULL ? *ptep : 0);
574 		kpreempt_enable();
575 		if (!pte_valid_p(entry))
576 			return;
577 
578 		mips_icache_sync_range(
579 		    MIPS_PHYS_TO_KSEG0(pte_to_paddr(entry) + (va & PGOFSET)),
580 		    len);
581 	}
582 }
583 
584 /*
585  *	pmap_zero_page zeros the specified page.
586  */
587 void
pmap_zero_page(paddr_t dst_pa)588 pmap_zero_page(paddr_t dst_pa)
589 {
590 	pt_entry_t dst_pte;
591 
592 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
593 	UVMHIST_LOG(pmaphist, "(pa=%#"PRIxPADDR")", dst_pa, 0, 0, 0);
594 	PMAP_COUNT(zeroed_pages);
595 
596 	struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst_pa);
597 
598 	const register_t dst_va = pmap_md_map_ephemeral_page(dst_pg, false,
599 	    VM_PROT_READ|VM_PROT_WRITE, &dst_pte);
600 
601 	mips_pagezero(dst_va);
602 
603 	pmap_md_unmap_ephemeral_page(dst_pg, false, dst_va, dst_pte);
604 
605 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
606 }
607 
608 /*
609  *	pmap_copy_page copies the specified page.
610  */
611 void
pmap_copy_page(paddr_t src_pa,paddr_t dst_pa)612 pmap_copy_page(paddr_t src_pa, paddr_t dst_pa)
613 {
614 	pt_entry_t src_pte, dst_pte;
615 
616 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
617 	UVMHIST_LOG(pmaphist, "(src_pa=%#lx, dst_pa=%#lx)", src_pa, dst_pa, 0, 0);
618 	PMAP_COUNT(copied_pages);
619 
620 	struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src_pa);
621 	struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst_pa);
622 
623 	const register_t src_va = pmap_md_map_ephemeral_page(src_pg, false,
624 	    VM_PROT_READ, &src_pte);
625 
626 	KASSERT(VM_PAGE_TO_MD(dst_pg)->mdpg_first.pv_pmap == NULL);
627 	const register_t dst_va = pmap_md_map_ephemeral_page(dst_pg, false,
628 	    VM_PROT_READ|VM_PROT_WRITE, &dst_pte);
629 
630 	mips_pagecopy(dst_va, src_va);
631 
632 	pmap_md_unmap_ephemeral_page(dst_pg, false, dst_va, dst_pte);
633 	pmap_md_unmap_ephemeral_page(src_pg, false, src_va, src_pte);
634 
635 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
636 }
637 
638 void
pmap_md_page_syncicache(struct vm_page * pg,const kcpuset_t * onproc)639 pmap_md_page_syncicache(struct vm_page *pg, const kcpuset_t *onproc)
640 {
641 	struct mips_options * const opts = &mips_options;
642 	if (opts->mips_cpu_flags & CPU_MIPS_I_D_CACHE_COHERENT)
643 		return;
644 
645 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
646 	pv_entry_t pv = &mdpg->mdpg_first;
647 	const vaddr_t va = trunc_page(pv->pv_va);
648 
649 	/*
650 	 * If onproc is empty, we could do a
651 	 * pmap_page_protect(pg, VM_PROT_NONE) and remove all
652 	 * mappings of the page and clear its execness.  Then
653 	 * the next time page is faulted, it will get icache
654 	 * synched.  But this is easier. :)
655 	 */
656 	if (MIPS_HAS_R4K_MMU) {
657 		if (VM_PAGEMD_CACHED_P(mdpg)) {
658 			mips_icache_sync_range_index( va, PAGE_SIZE);
659 		}
660 	} else {
661 		mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)),
662 		    PAGE_SIZE);
663 	}
664 #ifdef MULTIPROCESSOR
665 	pmap_tlb_syncicache(va, onproc);
666 #endif
667 }
668 
669 struct vm_page *
pmap_md_alloc_poolpage(int flags)670 pmap_md_alloc_poolpage(int flags)
671 {
672 	/*
673 	 * On 32bit kernels, we must make sure that we only allocate pages that
674 	 * can be mapped via KSEG0.  On 64bit kernels, try to allocated from
675 	 * the first 4G.  If all memory is in KSEG0/4G, then we can just
676 	 * use the default freelist otherwise we must use the pool page list.
677 	 */
678 	if (mips_poolpage_vmfreelist != VM_FREELIST_DEFAULT)
679 		return uvm_pagealloc_strat(NULL, 0, NULL, flags,
680 		    UVM_PGA_STRAT_ONLY, mips_poolpage_vmfreelist);
681 
682 	return uvm_pagealloc(NULL, 0, NULL, flags);
683 }
684 
685 vaddr_t
pmap_md_map_poolpage(paddr_t pa,size_t len)686 pmap_md_map_poolpage(paddr_t pa, size_t len)
687 {
688 
689 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
690 	vaddr_t va = pmap_md_pool_phystov(pa);
691 	KASSERT(cold || pg != NULL);
692 	if (pg != NULL) {
693 		struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
694 		pv_entry_t pv = &mdpg->mdpg_first;
695 		vaddr_t last_va = trunc_page(pv->pv_va);
696 
697 		KASSERT(pv->pv_pmap == NULL);
698 
699 		/*
700 		 * If this page was last mapped with an address that
701 		 * might cause aliases, flush the page from the cache.
702 		 */
703 		if (MIPS_CACHE_VIRTUAL_ALIAS
704 		    && mips_cache_badalias(last_va, va)) {
705 			pmap_md_vca_page_wbinv(pg, false);
706 		}
707 
708 		pv->pv_va = va;
709 	}
710 	return va;
711 }
712 
713 paddr_t
pmap_md_unmap_poolpage(vaddr_t va,size_t len)714 pmap_md_unmap_poolpage(vaddr_t va, size_t len)
715 {
716 	KASSERT(len == PAGE_SIZE);
717 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
718 
719 	const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
720 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
721 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
722 
723 	KASSERT(pg);
724 
725 	KASSERT(VM_PAGEMD_CACHED_P(mdpg));
726 	mdpg->mdpg_first.pv_va = va;
727 #if 0
728 	if (MIPS_CACHE_VIRTUAL_ALIAS) {
729 		/*
730 		 * We've unmapped a poolpage.  Its contents are irrelevant.
731 		 */
732 		KASSERT((va & PAGE_MASK) == 0);
733 		mips_dcache_inv_range(va, PAGE_SIZE);
734 		mdpg->mdpg_first.pv_va = va;
735 	}
736 #endif
737 
738 	return pa;
739 }
740 
741 bool
pmap_md_direct_mapped_vaddr_p(register_t va)742 pmap_md_direct_mapped_vaddr_p(register_t va)
743 {
744 #ifndef __mips_o32
745 	if (MIPS_XKPHYS_P(va))
746 		return true;
747 #endif
748 	return MIPS_KSEG0_P(va);
749 }
750 
751 paddr_t
pmap_md_direct_mapped_vaddr_to_paddr(register_t va)752 pmap_md_direct_mapped_vaddr_to_paddr(register_t va)
753 {
754 	if (MIPS_KSEG0_P(va)) {
755 		return MIPS_KSEG0_TO_PHYS(va);
756 	}
757 #ifndef __mips_o32
758 	if (MIPS_XKPHYS_P(va)) {
759 		return MIPS_XKPHYS_TO_PHYS(va);
760 	}
761 #endif
762 	panic("%s: va %#"PRIxREGISTER" not direct mapped!", __func__, va);
763 }
764 
765 bool
pmap_md_io_vaddr_p(vaddr_t va)766 pmap_md_io_vaddr_p(vaddr_t va)
767 {
768 #ifdef _LP64
769 	if (MIPS_XKPHYS_P(va)) {
770 		return MIPS_XKPHYS_TO_CCA(va) == CCA_UNCACHED;
771 	}
772 #endif
773 	return MIPS_KSEG1_P(va);
774 }
775 
776 void
pmap_md_icache_sync_range_index(vaddr_t va,vsize_t len)777 pmap_md_icache_sync_range_index(vaddr_t va, vsize_t len)
778 {
779 	mips_icache_sync_range_index(va, len);
780 }
781 
782 void
pmap_md_icache_sync_all(void)783 pmap_md_icache_sync_all(void)
784 {
785 	mips_icache_sync_all();
786 }
787 
788 #ifdef MULTIPROCESSOR
789 void
pmap_md_tlb_info_attach(struct pmap_tlb_info * ti,struct cpu_info * ci)790 pmap_md_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
791 {
792 	if (ci->ci_index != 0)
793 		return;
794 	const u_int icache_way_pages =
795 	    mips_cache_info.mci_picache_way_size >> PGSHIFT;
796 
797 	KASSERT(icache_way_pages <= 8*sizeof(pmap_tlb_synci_page_mask));
798 	pmap_tlb_synci_page_mask = icache_way_pages - 1;
799 	pmap_tlb_synci_map_mask = ~(~0 << icache_way_pages);
800 	printf("tlb0: synci page mask %#x and map mask %#x used for %u pages\n",
801 	    pmap_tlb_synci_page_mask, pmap_tlb_synci_map_mask, icache_way_pages);
802 }
803 #endif
804 
805 
806 bool
pmap_md_tlb_check_entry(void * ctx,vaddr_t va,tlb_asid_t asid,pt_entry_t pte)807 pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
808 {
809 	pmap_t pm = ctx;
810 	struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
811 	struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
812 
813 	if (asid != pai->pai_asid)
814 		return true;
815 	if (!pte_valid_p(pte)) {
816 		KASSERT(MIPS_HAS_R4K_MMU);
817 		KASSERTMSG(pte == MIPS3_PG_G, "va %#"PRIxVADDR" pte %#"PRIxPTE,
818 		    va, pte_value(pte));
819 		return true;
820 	}
821 
822 	const pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
823 	KASSERTMSG(ptep != NULL, "va %#"PRIxVADDR" asid %u pte %#"PRIxPTE,
824 	    va, asid, pte_value(pte));
825 	const pt_entry_t opte = *ptep;
826 	pt_entry_t xpte = opte;
827 	if (MIPS_HAS_R4K_MMU) {
828 		xpte &= ~(MIPS3_PG_WIRED|MIPS3_PG_RO);
829 	} else {
830 		xpte &= ~(MIPS1_PG_WIRED|MIPS1_PG_RO);
831 	}
832 
833         KASSERTMSG(pte == xpte,
834             "pmap=%p va=%#"PRIxVADDR" asid=%u: TLB pte (%#"PRIxPTE
835 	    ") != real pte (%#"PRIxPTE"/%#"PRIxPTE") @ %p",
836             pm, va, asid, pte_value(pte), pte_value(xpte), pte_value(opte),
837 	    ptep);
838 
839         return true;
840 }
841 
842 void
tlb_walk(void * ctx,tlb_walkfunc_t func)843 tlb_walk(void *ctx, tlb_walkfunc_t func)
844 {
845 	kpreempt_disable();
846 	for (size_t i = 0; i < mips_options.mips_num_tlb_entries; i++) {
847 		struct tlbmask tlbmask;
848 		tlb_asid_t asid;
849 		vaddr_t va;
850 		tlb_read_entry(i, &tlbmask);
851 		if (MIPS_HAS_R4K_MMU) {
852 			asid = __SHIFTOUT(tlbmask.tlb_hi, MIPS3_PG_ASID);
853 			va = tlbmask.tlb_hi & MIPS3_PG_HVPN;
854 		} else {
855 			asid = __SHIFTOUT(tlbmask.tlb_hi, MIPS1_TLB_PID);
856 			va = tlbmask.tlb_hi & MIPS1_PG_FRAME;
857 		}
858 		if ((pt_entry_t)tlbmask.tlb_lo0 != 0) {
859 			pt_entry_t pte = tlbmask.tlb_lo0;
860 			tlb_asid_t asid0 = (pte_global_p(pte) ? KERNEL_PID : asid);
861 			if (!(*func)(ctx, va, asid0, pte))
862 				break;
863 		}
864 #if (PGSHIFT & 1) == 0
865 		if (MIPS_HAS_R4K_MMU && (pt_entry_t)tlbmask.tlb_lo1 != 0) {
866 			pt_entry_t pte = tlbmask.tlb_lo1;
867 			tlb_asid_t asid1 = (pte_global_p(pte) ? KERNEL_PID : asid);
868 			if (!(*func)(ctx, va + MIPS3_PG_ODDPG, asid1, pte))
869 				break;
870 		}
871 #endif
872 	}
873 	kpreempt_enable();
874 }
875 
876 bool
pmap_md_vca_add(struct vm_page * pg,vaddr_t va,pt_entry_t * ptep)877 pmap_md_vca_add(struct vm_page *pg, vaddr_t va, pt_entry_t *ptep)
878 {
879 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
880 	if (!MIPS_HAS_R4K_MMU || !MIPS_CACHE_VIRTUAL_ALIAS)
881 		return false;
882 
883 	/*
884 	 * There is at least one other VA mapping this page.
885 	 * Check if they are cache index compatible.
886 	 */
887 
888 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
889 	pv_entry_t pv = &mdpg->mdpg_first;
890 #if defined(PMAP_NO_PV_UNCACHED)
891 	/*
892 	 * Instead of mapping uncached, which some platforms
893 	 * cannot support, remove incompatible mappings from others pmaps.
894 	 * When this address is touched again, the uvm will
895 	 * fault it in.  Because of this, each page will only
896 	 * be mapped with one index at any given time.
897 	 *
898 	 * We need to deal with all entries on the list - if the first is
899 	 * incompatible with the new mapping then they all will be.
900 	 */
901 	if (__predict_true(!mips_cache_badalias(pv->pv_va, va))) {
902 		return false;
903 	}
904 	for (pv_entry_t npv = pv; npv; npv = npv->pv_next) {
905 		if (npv->pv_va & PV_KENTER)
906 			continue;
907 		vaddr_t nva = trunc_page(npv->pv_va);
908 		pmap_t npm = npv->pv_pmap;
909 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
910 		pmap_remove(npm, nva, nva + PAGE_SIZE);
911 		pmap_update(npm);
912 		(void)VM_PAGEMD_PVLIST_LOCK(mdpg);
913 	}
914 	return true;
915 #else	/* !PMAP_NO_PV_UNCACHED */
916 	if (VM_PAGEMD_CACHED_P(mdpg)) {
917 		/*
918 		 * If this page is cached, then all mappings
919 		 * have the same cache alias so we only need
920 		 * to check the first page to see if it's
921 		 * incompatible with the new mapping.
922 		 *
923 		 * If the mappings are incompatible, map this
924 		 * page as uncached and re-map all the current
925 		 * mapping as uncached until all pages can
926 		 * share the same cache index again.
927 		 */
928 		if (mips_cache_badalias(pv->pv_va, va)) {
929 			pmap_page_cache(pg, false);
930 			pmap_md_vca_page_wbinv(pg, true);
931 			*ptep = pte_cached_change(*ptep, false);
932 			PMAP_COUNT(page_cache_evictions);
933 		}
934 	} else {
935 		*ptep = pte_cached_change(*ptep, false);
936 		PMAP_COUNT(page_cache_evictions);
937 	}
938 	return false;
939 #endif	/* !PMAP_NO_PV_UNCACHED */
940 }
941 
942 void
pmap_md_vca_clean(struct vm_page * pg,int op)943 pmap_md_vca_clean(struct vm_page *pg, int op)
944 {
945 	if (!MIPS_HAS_R4K_MMU || !MIPS_CACHE_VIRTUAL_ALIAS)
946 		return;
947 
948 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(VM_PAGE_TO_MD(pg)));
949 
950 	if (op == PMAP_WB || op == PMAP_WBINV) {
951 		pmap_md_vca_page_wbinv(pg, true);
952 	} else if (op == PMAP_INV) {
953 		KASSERT(op == PMAP_INV && false);
954 		//mips_dcache_inv_range_index(va, PAGE_SIZE);
955 	}
956 }
957 
958 /*
959  * In the PMAP_NO_PV_CACHED case, all conflicts are resolved at mapping
960  * so nothing needs to be done in removal.
961  */
962 void
pmap_md_vca_remove(struct vm_page * pg,vaddr_t va,bool dirty,bool last)963 pmap_md_vca_remove(struct vm_page *pg, vaddr_t va, bool dirty, bool last)
964 {
965 #if !defined(PMAP_NO_PV_UNCACHED)
966 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
967 	if (!MIPS_HAS_R4K_MMU
968 	    || !MIPS_CACHE_VIRTUAL_ALIAS
969 	    || !VM_PAGEMD_UNCACHED_P(mdpg))
970 		return;
971 
972 	KASSERT(kpreempt_disabled());
973 	KASSERT(!VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
974 	KASSERT((va & PAGE_MASK) == 0);
975 
976 	/*
977 	 * Page is currently uncached, check if alias mapping has been
978 	 * removed.  If it was, then reenable caching.
979 	 */
980 	(void)VM_PAGEMD_PVLIST_READLOCK(mdpg);
981 	pv_entry_t pv = &mdpg->mdpg_first;
982 	pv_entry_t pv0 = pv->pv_next;
983 
984 	for (; pv0; pv0 = pv0->pv_next) {
985 		if (mips_cache_badalias(pv->pv_va, pv0->pv_va))
986 			break;
987 	}
988 	if (pv0 == NULL)
989 		pmap_page_cache(pg, true);
990 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
991 #endif
992 }
993 
994 paddr_t
pmap_md_pool_vtophys(vaddr_t va)995 pmap_md_pool_vtophys(vaddr_t va)
996 {
997 #ifdef _LP64
998 	if (MIPS_XKPHYS_P(va))
999 		return MIPS_XKPHYS_TO_PHYS(va);
1000 #endif
1001 	KASSERT(MIPS_KSEG0_P(va));
1002 	return MIPS_KSEG0_TO_PHYS(va);
1003 }
1004 
1005 vaddr_t
pmap_md_pool_phystov(paddr_t pa)1006 pmap_md_pool_phystov(paddr_t pa)
1007 {
1008 #ifdef _LP64
1009 	if ((pa & ~MIPS_PHYS_MASK) != 0) {
1010 		KASSERT(mips_options.mips3_xkphys_cached);
1011 		return MIPS_PHYS_TO_XKPHYS_CACHED(pa);
1012 	}
1013 #else
1014 	KASSERT((pa & ~MIPS_PHYS_MASK) == 0);
1015 #endif
1016 	return MIPS_PHYS_TO_KSEG0(pa);
1017 }
1018