xref: /netbsd/sys/arch/mips/mips/vm_machdep.c (revision 3a4b11ba)
1 /*	$NetBSD: vm_machdep.c,v 1.166 2023/02/25 08:41:37 skrll Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department and Ralph Campbell.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * from: Utah Hdr: vm_machdep.c 1.21 91/04/06
37  *
38  *	@(#)vm_machdep.c	8.3 (Berkeley) 1/4/94
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.166 2023/02/25 08:41:37 skrll Exp $");
43 
44 #include "opt_ddb.h"
45 #include "opt_cputype.h"
46 
47 #define __PMAP_PRIVATE
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/proc.h>
52 #include <sys/buf.h>
53 #include <sys/cpu.h>
54 #include <sys/vnode.h>
55 #include <sys/core.h>
56 #include <sys/exec.h>
57 
58 #include <uvm/uvm.h>
59 
60 #include <mips/cache.h>
61 #include <mips/pcb.h>
62 #include <mips/regnum.h>
63 #include <mips/locore.h>
64 #include <mips/pte.h>
65 #include <mips/psl.h>
66 
67 paddr_t kvtophys(vaddr_t);	/* XXX */
68 
69 /*
70  * cpu_lwp_fork: Finish a fork operation, with lwp l2 nearly set up.
71  * Copy and update the pcb and trapframe, making the child ready to run.
72  *
73  * First LWP (l1) is the lwp being forked.  If it is &lwp0, then we are
74  * creating a kthread, where return path and argument are specified
75  * with `func' and `arg'.
76  *
77  * Rig the child's kernel stack so that it will start out in lwp_trampoline()
78  * and call child_return() with l2 as an argument. This causes the
79  * newly-created child process to go directly to user level with an apparent
80  * return value of 0 from fork(), while the parent process returns normally.
81  *
82  * If an alternate user-level stack is requested (with non-zero values
83  * in both the stack and stacksize arguments), then set up the user stack
84  * pointer accordingly.
85  */
86 void
cpu_lwp_fork(struct lwp * l1,struct lwp * l2,void * stack,size_t stacksize,void (* func)(void *),void * arg)87 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
88     void (*func)(void *), void *arg)
89 {
90 	struct pcb * const pcb1 = lwp_getpcb(l1);
91 	struct pcb * const pcb2 = lwp_getpcb(l2);
92 	struct trapframe *tf;
93 
94 	KASSERT(l1 == curlwp || l1 == &lwp0);
95 
96 	KASSERT(l2->l_md.md_ss_addr == 0);
97 	KASSERT(l2->l_md.md_ss_instr == 0);
98 	KASSERT(l2->l_md.md_astpending == 0);
99 
100 	/* Copy the PCB from parent. */
101 	*pcb2 = *pcb1;
102 
103 	/*
104 	 * Copy the trapframe from parent, so that return to userspace
105 	 * will be to right address, with correct registers.
106 	 */
107 	vaddr_t ua2 = uvm_lwp_getuarea(l2);
108 	tf = (struct trapframe *)(ua2 + USPACE) - 1;
109 	*tf = *l1->l_md.md_utf;
110 
111 	/* If specified, set a different user stack for a child. */
112 	if (stack != NULL)
113 		tf->tf_regs[_R_SP] = (intptr_t)stack + stacksize;
114 
115 	l2->l_md.md_utf = tf;
116 #if (USPACE > PAGE_SIZE) || !defined(_LP64)
117 	CTASSERT(__arraycount(l2->l_md.md_upte) >= UPAGES);
118 	for (u_int i = 0; i < __arraycount(l2->l_md.md_upte); i++) {
119 		l2->l_md.md_upte[i] = 0;
120 	}
121 	if (!pmap_md_direct_mapped_vaddr_p(ua2)) {
122 		pt_entry_t * const pte = pmap_pte_lookup(pmap_kernel(), ua2);
123 		const uint32_t x = MIPS_HAS_R4K_MMU
124 		    ? (MIPS3_PG_RO | MIPS3_PG_WIRED)
125 		    : 0;
126 
127 		for (u_int i = 0; i < UPAGES; i++) {
128 			KASSERT(pte_valid_p(pte[i]));
129 			KASSERT(pte_global_p(pte[i]));
130 			l2->l_md.md_upte[i] = pte[i] & ~x;
131 		}
132 	}
133 #else
134 	KASSERT(pmap_md_direct_mapped_vaddr_p(ua2));
135 #endif
136 	/*
137 	 * Rig kernel stack so that it would start out in lwp_trampoline()
138 	 * and call child_return() with l as an argument.  This causes the
139 	 * newly-created child process to go directly to user level with a
140 	 * parent return value of 0 from fork(), while the parent process
141 	 * returns normally.
142 	 */
143 
144 	pcb2->pcb_context.val[_L_S0] = (intptr_t)func;			/* S0 */
145 	pcb2->pcb_context.val[_L_S1] = (intptr_t)arg;			/* S1 */
146 	pcb2->pcb_context.val[MIPS_CURLWP_LABEL] = (intptr_t)l2;	/* T8 */
147 	pcb2->pcb_context.val[_L_SP] = (intptr_t)tf;			/* SP */
148 	pcb2->pcb_context.val[_L_RA] =
149 	   mips_locore_jumpvec.ljv_lwp_trampoline;			/* RA */
150 #if defined(_LP64) || defined(__mips_n32)
151 	KASSERT(tf->tf_regs[_R_SR] & MIPS_SR_KX);
152 	KASSERT(pcb2->pcb_context.val[_L_SR] & MIPS_SR_KX);
153 #endif
154 #ifndef MIPS1	/* XXX: broken */
155 	KASSERTMSG(pcb2->pcb_context.val[_L_SR] & MIPS_SR_INT_IE,
156 	    "%d.%d %#"PRIxREGISTER,
157 	    l1->l_proc->p_pid, l1->l_lid,
158 	    pcb2->pcb_context.val[_L_SR]);
159 #endif
160 }
161 
162 /*
163  * Routine to copy MD stuff from proc to proc on a fork.
164  * For mips, this is the ABI and "32 bit process on a 64 bit kernel" flag.
165  */
166 void
cpu_proc_fork(struct proc * p1,struct proc * p2)167 cpu_proc_fork(struct proc *p1, struct proc *p2)
168 {
169 	p2->p_md.md_abi = p1->p_md.md_abi;
170 }
171 
172 void *
cpu_uarea_alloc(bool system)173 cpu_uarea_alloc(bool system)
174 {
175 #ifdef PMAP_MAP_POOLPAGE
176 
177 	struct pglist pglist;
178 #ifdef _LP64
179 	const paddr_t high = pmap_limits.avail_end;
180 #else
181 	const paddr_t high = MIPS_KSEG1_START - MIPS_KSEG0_START;
182 	/*
183 	 * Don't allocate a direct mapped uarea if we aren't allocating for a
184 	 * system lwp and we have memory that can't be mapped via KSEG0.
185 	 */
186 	if (!system && high < pmap_limits.avail_end)
187 		return NULL;
188 #endif
189 	int error;
190 
191 	/*
192 	 * Allocate a new physically contiguous uarea which can be
193 	 * direct-mapped.
194 	 */
195 	error = uvm_pglistalloc(USPACE, pmap_limits.avail_start, high,
196 	    USPACE_ALIGN, 0, &pglist, 1, 1);
197 	if (error) {
198 		if (!system)
199 			return NULL;
200 		panic("%s: uvm_pglistalloc failed: %d", __func__, error);
201 	}
202 
203 	/*
204 	 * Get the physical address from the first page.
205 	 */
206 	const struct vm_page * const pg = TAILQ_FIRST(&pglist);
207 	KASSERT(pg != NULL);
208 	const paddr_t __diagused pa = VM_PAGE_TO_PHYS(pg);
209 	KASSERTMSG(pa >= pmap_limits.avail_start,
210 	    "pa (%#"PRIxPADDR") < pmap_limits.avail_start (%#"PRIxPADDR")",
211 	     pa, pmap_limits.avail_start);
212 	KASSERTMSG(pa < pmap_limits.avail_end,
213 	    "pa (%#"PRIxPADDR") >= pmap_limits.avail_end (%#"PRIxPADDR")",
214 	     pa, pmap_limits.avail_end);
215 
216 	/*
217 	 * we need to return a direct-mapped VA for the pa of the first (maybe
218 	 * only) page and call PMAP_MAP_POOLPAGE on all pages in the list, so
219 	 * that cache aliases are handled correctly.
220 	 */
221 
222 	/* Initialise to unexpected result */
223 	vaddr_t va = MIPS_KSEG2_START;
224 	const struct vm_page *pglv;
225 	TAILQ_FOREACH_REVERSE(pglv, &pglist, pglist, pageq.queue) {
226 		const paddr_t palv = VM_PAGE_TO_PHYS(pglv);
227 		va = PMAP_MAP_POOLPAGE(palv);
228 	}
229 
230 	KASSERT(va != MIPS_KSEG2_START);
231 
232 	return (void *)va;
233 #else
234 	return NULL;
235 #endif
236 }
237 
238 /*
239  * Return true if we freed it, false if we didn't.
240  */
241 bool
cpu_uarea_free(void * va)242 cpu_uarea_free(void *va)
243 {
244 #ifdef PMAP_UNMAP_POOLPAGE
245 #ifdef _LP64
246 	if (!MIPS_XKPHYS_P(va))
247 		return false;
248 #else
249 	if (!MIPS_KSEG0_P(va))
250 		return false;
251 #endif
252 
253 	vaddr_t valv = (vaddr_t)va;
254 	for (size_t i = 0; i < UPAGES; i++, valv += NBPG) {
255 		const paddr_t pa = PMAP_UNMAP_POOLPAGE(valv);
256 	    	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
257 		KASSERT(pg != NULL);
258 		uvm_pagefree(pg);
259 	}
260 
261 	return true;
262 #else
263 	return false;
264 #endif
265 
266 }
267 
268 void
cpu_lwp_free(struct lwp * l,int proc)269 cpu_lwp_free(struct lwp *l, int proc)
270 {
271 
272 	(void)l;
273 }
274 
275 vaddr_t
cpu_lwp_pc(struct lwp * l)276 cpu_lwp_pc(struct lwp *l)
277 {
278 	return l->l_md.md_utf->tf_regs[_R_PC];
279 }
280 
281 void
cpu_lwp_free2(struct lwp * l)282 cpu_lwp_free2(struct lwp *l)
283 {
284 
285 	(void)l;
286 }
287 
288 /*
289  * Map a user I/O request into kernel virtual address space.
290  */
291 int
vmapbuf(struct buf * bp,vsize_t len)292 vmapbuf(struct buf *bp, vsize_t len)
293 {
294 	vaddr_t kva;	/* Kernel VA (new to) */
295 
296 	if ((bp->b_flags & B_PHYS) == 0)
297 		panic("vmapbuf");
298 
299 	vaddr_t uva = mips_trunc_page(bp->b_data);
300 	const vaddr_t off = (vaddr_t)bp->b_data - uva;
301         len = mips_round_page(off + len);
302 
303 	kva = uvm_km_alloc(phys_map, len, atop(uva) & uvmexp.colormask,
304 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
305 	KASSERT((atop(kva ^ uva) & uvmexp.colormask) == 0);
306 	bp->b_saveaddr = bp->b_data;
307 	bp->b_data = (void *)(kva + off);
308 	struct pmap * const upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
309 	do {
310 		paddr_t pa;	/* physical address */
311 		if (pmap_extract(upmap, uva, &pa) == false)
312 			panic("vmapbuf: null page frame");
313 		pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE,
314 		    PMAP_WIRED);
315 		uva += PAGE_SIZE;
316 		kva += PAGE_SIZE;
317 		len -= PAGE_SIZE;
318 	} while (len);
319 	pmap_update(pmap_kernel());
320 
321 	return 0;
322 }
323 
324 /*
325  * Unmap a previously-mapped user I/O request.
326  */
327 void
vunmapbuf(struct buf * bp,vsize_t len)328 vunmapbuf(struct buf *bp, vsize_t len)
329 {
330 	vaddr_t kva;
331 
332 	if ((bp->b_flags & B_PHYS) == 0)
333 		panic("vunmapbuf");
334 
335 	kva = mips_trunc_page(bp->b_data);
336 	len = mips_round_page((vaddr_t)bp->b_data - kva + len);
337 	pmap_kremove(kva, len);
338 	pmap_update(pmap_kernel());
339 	uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY);
340 	bp->b_data = bp->b_saveaddr;
341 	bp->b_saveaddr = NULL;
342 }
343 
344 /*
345  * Map a (kernel) virtual address to a physical address.
346  *
347  * MIPS processor has 3 distinct kernel address ranges:
348  *
349  * - kseg0 kernel "virtual address" for the   cached physical address space.
350  * - kseg1 kernel "virtual address" for the uncached physical address space.
351  * - kseg2 normal kernel "virtual address" mapped via the TLB.
352  */
353 paddr_t
kvtophys(vaddr_t kva)354 kvtophys(vaddr_t kva)
355 {
356 	paddr_t phys;
357 
358 	if (MIPS_KSEG1_P(kva))
359 		return MIPS_KSEG1_TO_PHYS(kva);
360 
361 	if (MIPS_KSEG0_P(kva))
362 		return MIPS_KSEG0_TO_PHYS(kva);
363 
364 	if (kva >= VM_MIN_KERNEL_ADDRESS) {
365 		if (kva >= VM_MAX_KERNEL_ADDRESS)
366 			goto overrun;
367 
368 		pt_entry_t * const ptep = pmap_pte_lookup(pmap_kernel(), kva);
369 		if (ptep == NULL)
370 			goto overrun;
371 		if (!pte_valid_p(*ptep)) {
372 			printf("kvtophys: pte not valid for %#"PRIxVADDR"\n",
373 			    kva);
374 		}
375 		phys = pte_to_paddr(*ptep) | (kva & PGOFSET);
376 		return phys;
377 	}
378 #ifdef _LP64
379 	if (MIPS_XKPHYS_P(kva))
380 		return MIPS_XKPHYS_TO_PHYS(kva);
381 #endif
382 overrun:
383 	printf("Virtual address %#"PRIxVADDR": cannot map to physical\n", kva);
384 #ifdef DDB
385 	Debugger();
386 	return 0;	/* XXX */
387 #endif
388 	panic("kvtophys");
389 }
390 
391 /*
392  * Make a kernel mapping valid for I/O, e.g. non-cachable.
393  * Alignment and length constraints are as-if NBPG==PAGE_SIZE.
394  */
395 int
ioaccess(vaddr_t vaddr,paddr_t paddr,vsize_t len)396 ioaccess(vaddr_t vaddr, paddr_t paddr, vsize_t len)
397 {
398 
399 	while (len > PAGE_SIZE) {
400 		pmap_kenter_pa(vaddr, paddr, VM_PROT_WRITE, 0);
401 		len -= PAGE_SIZE;
402 		vaddr += PAGE_SIZE;
403 		paddr += PAGE_SIZE;
404 	}
405 
406 	if (len) {
407 		/* We could warn.. */
408 		pmap_kenter_pa(vaddr, paddr, VM_PROT_WRITE, 0);
409 	}
410 
411 	/* BUGBUG should use pmap_enter() instead and check results! */
412 	return 0;
413 }
414 
415 /*
416  * Opposite to the above: just forget the mapping.
417  */
418 int
iounaccess(vaddr_t vaddr,vsize_t len)419 iounaccess(vaddr_t vaddr, vsize_t len)
420 {
421 
422 	pmap_kremove(vaddr, len);
423 	return 0;
424 }
425