xref: /netbsd/sys/arch/riscv/riscv/vm_machdep.c (revision 0cdd3313)
1*0cdd3313Sskrll /*	$NetBSD: vm_machdep.c,v 1.8 2023/05/07 12:41:49 skrll Exp $	*/
28d973866Smatt 
38d973866Smatt /*-
48d973866Smatt  * Copyright (c) 2014 The NetBSD Foundation, Inc.
58d973866Smatt  * All rights reserved.
68d973866Smatt  *
78d973866Smatt  * This code is derived from software contributed to The NetBSD Foundation
88d973866Smatt  * by Matt Thomas of 3am Software Foundry.
98d973866Smatt  *
108d973866Smatt  * Redistribution and use in source and binary forms, with or without
118d973866Smatt  * modification, are permitted provided that the following conditions
128d973866Smatt  * are met:
138d973866Smatt  * 1. Redistributions of source code must retain the above copyright
148d973866Smatt  *    notice, this list of conditions and the following disclaimer.
158d973866Smatt  * 2. Redistributions in binary form must reproduce the above copyright
168d973866Smatt  *    notice, this list of conditions and the following disclaimer in the
178d973866Smatt  *    documentation and/or other materials provided with the distribution.
188d973866Smatt  *
198d973866Smatt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
208d973866Smatt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
218d973866Smatt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
228d973866Smatt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
238d973866Smatt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
248d973866Smatt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
258d973866Smatt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
268d973866Smatt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
278d973866Smatt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
288d973866Smatt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
298d973866Smatt  * POSSIBILITY OF SUCH DAMAGE.
308d973866Smatt  */
318d973866Smatt 
328d973866Smatt #include <sys/cdefs.h>
33*0cdd3313Sskrll __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.8 2023/05/07 12:41:49 skrll Exp $");
348d973866Smatt 
358d973866Smatt #define _PMAP_PRIVATE
368d973866Smatt 
378d973866Smatt #include "opt_ddb.h"
388d973866Smatt 
398d973866Smatt #include <sys/param.h>
408d973866Smatt #include <sys/systm.h>
418d973866Smatt #include <sys/proc.h>
428d973866Smatt #include <sys/buf.h>
438d973866Smatt #include <sys/cpu.h>
448d973866Smatt #include <sys/vnode.h>
458d973866Smatt #include <sys/core.h>
468d973866Smatt #include <sys/exec.h>
478d973866Smatt 
488d973866Smatt #include <uvm/uvm.h>
498d973866Smatt 
508d973866Smatt #include <dev/mm.h>
518d973866Smatt 
52*0cdd3313Sskrll #include <riscv/frame.h>
538d973866Smatt #include <riscv/locore.h>
54*0cdd3313Sskrll #include <riscv/machdep.h>
558d973866Smatt 
568d973866Smatt /*
578d973866Smatt  * cpu_lwp_fork: Finish a fork operation, with lwp l2 nearly set up.
588d973866Smatt  * Copy and update the pcb and trapframe, making the child ready to run.
598d973866Smatt  *
608d973866Smatt  * First LWP (l1) is the lwp being forked.  If it is &lwp0, then we are
618d973866Smatt  * creating a kthread, where return path and argument are specified
628d973866Smatt  * with `func' and `arg'.
638d973866Smatt  *
648d973866Smatt  * Rig the child's kernel stack so that it starts out in cpu_lwp_trampoline()
658d973866Smatt  * and calls child_return() with l2 as an argument. This causes the
668d973866Smatt  * newly-created child process to go directly to user level with an apparent
678d973866Smatt  * return value of 0 from fork(), while the parent process returns normally.
688d973866Smatt  *
698d973866Smatt  * If an alternate user-level stack is requested (with non-zero values
708d973866Smatt  * in both the stack and stacksize arguments), then set up the user stack
718d973866Smatt  * pointer accordingly.
728d973866Smatt  */
738d973866Smatt void
cpu_lwp_fork(struct lwp * l1,struct lwp * l2,void * stack,size_t stacksize,void (* func)(void *),void * arg)748d973866Smatt cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
758d973866Smatt     void (*func)(void *), void *arg)
768d973866Smatt {
778d973866Smatt 	struct pcb * const pcb1 = lwp_getpcb(l1);
788d973866Smatt 	struct pcb * const pcb2 = lwp_getpcb(l2);
798d973866Smatt 	struct trapframe *tf;
808d973866Smatt 
818d973866Smatt 	KASSERT(l1 == curlwp || l1 == &lwp0);
821f022febSskrll 	KASSERT(l2->l_md.md_astpending == 0);
838d973866Smatt 
848d973866Smatt 	/* Copy the PCB from parent. */
858d973866Smatt 	*pcb2 = *pcb1;
868d973866Smatt 
878d973866Smatt 	/*
888d973866Smatt 	 * Copy the trapframe from parent, so that return to userspace
898d973866Smatt 	 * will be to right address, with correct registers.
908d973866Smatt 	 */
918d973866Smatt 	vaddr_t ua2 = uvm_lwp_getuarea(l2);
92*0cdd3313Sskrll 
938d973866Smatt 	tf = (struct trapframe *)(ua2 + USPACE) - 1;
948d973866Smatt 	*tf = *l1->l_md.md_utf;
95d17b1cacSskrll #ifdef FPE
96*0cdd3313Sskrll 	tf->tf_sr &= ~SR_FS;	/* floating point must be disabled */
97d17b1cacSskrll #endif
988d973866Smatt 
998d973866Smatt 	/* If specified, set a different user stack for a child. */
1008d973866Smatt 	if (stack != NULL) {
1018d973866Smatt 		tf->tf_sp = stack_align((intptr_t)stack + stacksize);
1028d973866Smatt 	}
1038d973866Smatt 
1048d973866Smatt 	l2->l_md.md_utf = tf;
1058d973866Smatt 
1068d973866Smatt 	/*
1078d973866Smatt 	 * Rig kernel stack so that it would start out in cpu_lwp_trampoline()
1088d973866Smatt 	 * and call child_return() with l as an argument.  This causes the
1098d973866Smatt 	 * newly-created child process to go directly to user level with a
1108d973866Smatt 	 * parent return value of 0 from fork(), while the parent process
1118d973866Smatt 	 * returns normally.
1128d973866Smatt 	 */
1138d973866Smatt 	--tf;	/* cpu_switchto uses trapframes */
1148d973866Smatt 
115*0cdd3313Sskrll 	tf->tf_s0 = 0;				/* S0 (aka frame pointer) */
116*0cdd3313Sskrll 	tf->tf_s1 = (intptr_t)func;		/* S1 */
117*0cdd3313Sskrll 	tf->tf_s2 = (intptr_t)arg;		/* S2 */
118*0cdd3313Sskrll 	tf->tf_ra = (intptr_t)lwp_trampoline;	/* RA */
119*0cdd3313Sskrll 
1208d973866Smatt 	l2->l_md.md_ktf = tf;			/* SP */
121*0cdd3313Sskrll 
122*0cdd3313Sskrll 	KASSERT(l2->l_md.md_astpending == 0);
1238d973866Smatt }
1248d973866Smatt 
1258d973866Smatt /*
1268d973866Smatt  * Routine to copy MD stuff from proc to proc on a fork.
1278d973866Smatt  */
1288d973866Smatt void
cpu_proc_fork(struct proc * p1,struct proc * p2)1298d973866Smatt cpu_proc_fork(struct proc *p1, struct proc *p2)
1308d973866Smatt {
1318d973866Smatt }
1328d973866Smatt 
1338d973866Smatt #ifdef _LP64
1348d973866Smatt void *
cpu_uarea_alloc(bool system)1358d973866Smatt cpu_uarea_alloc(bool system)
1368d973866Smatt {
1378d973866Smatt 	struct pglist pglist;
1388d973866Smatt 	int error;
1398d973866Smatt 
1408d973866Smatt 	/*
1418d973866Smatt 	 * Allocate a new physically contiguous uarea which can be
1428d973866Smatt 	 * direct-mapped.
1438d973866Smatt 	 */
1448d973866Smatt 	error = uvm_pglistalloc(USPACE, pmap_limits.avail_start,
1458d973866Smatt 	    pmap_limits.avail_end, USPACE_ALIGN, 0, &pglist, 1, 1);
1468d973866Smatt 	if (error) {
1478d973866Smatt 		return NULL;
1488d973866Smatt 	}
1498d973866Smatt 
1508d973866Smatt 	/*
1518d973866Smatt 	 * Get the physical address from the first page.
1528d973866Smatt 	 */
1538d973866Smatt 	const struct vm_page * const pg = TAILQ_FIRST(&pglist);
1548d973866Smatt 	KASSERT(pg != NULL);
1558d973866Smatt 	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
1568d973866Smatt 	KASSERTMSG(pa >= pmap_limits.avail_start,
1578d973866Smatt 	    "pa (%#"PRIxPADDR") < avail_start (%#"PRIxPADDR")",
1588d973866Smatt 	     pa, pmap_limits.avail_start);
1598d973866Smatt 	KASSERTMSG(pa + USPACE <= pmap_limits.avail_end,
1608d973866Smatt 	    "pa (%#"PRIxPADDR") >= avail_end (%#"PRIxPADDR")",
1618d973866Smatt 	     pa, pmap_limits.avail_end);
1628d973866Smatt 
1638d973866Smatt 	/*
1648d973866Smatt 	 * we need to return a direct-mapped VA for the pa.
1658d973866Smatt 	 */
1668d973866Smatt 	return (void *)pmap_md_direct_map_paddr(pa);
1678d973866Smatt }
1688d973866Smatt 
1698d973866Smatt /*
1708d973866Smatt  * Return true if we freed it, false if we didn't.
1718d973866Smatt  */
1728d973866Smatt bool
cpu_uarea_free(void * va)1738d973866Smatt cpu_uarea_free(void *va)
1748d973866Smatt {
1758d973866Smatt 	if (!pmap_md_direct_mapped_vaddr_p((vaddr_t)va))
1768d973866Smatt 		return false;
1778d973866Smatt 
1788d973866Smatt 	paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr((vaddr_t)va);
1798d973866Smatt 
1808d973866Smatt 	for (const paddr_t epa = pa + USPACE; pa < epa; pa += PAGE_SIZE) {
1818d973866Smatt 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1828d973866Smatt 		KASSERT(pg != NULL);
1838d973866Smatt 		uvm_pagefree(pg);
1848d973866Smatt 	}
1858d973866Smatt 	return true;
1868d973866Smatt }
1878d973866Smatt #endif /* _LP64 */
1888d973866Smatt 
1898d973866Smatt void
cpu_lwp_free(struct lwp * l,int proc)1908d973866Smatt cpu_lwp_free(struct lwp *l, int proc)
1918d973866Smatt {
1928d973866Smatt 
1938d973866Smatt 	(void)l;
1948d973866Smatt }
1958d973866Smatt 
1968d973866Smatt vaddr_t
cpu_lwp_pc(struct lwp * l)1978d973866Smatt cpu_lwp_pc(struct lwp *l)
1988d973866Smatt {
1998d973866Smatt 	return l->l_md.md_utf->tf_pc;
2008d973866Smatt }
2018d973866Smatt 
2028d973866Smatt void
cpu_lwp_free2(struct lwp * l)2038d973866Smatt cpu_lwp_free2(struct lwp *l)
2048d973866Smatt {
2058d973866Smatt 
2068d973866Smatt 	(void)l;
2078d973866Smatt }
2088d973866Smatt 
2098d973866Smatt /*
2108d973866Smatt  * Map a user I/O request into kernel virtual address space.
2118d973866Smatt  */
2128d973866Smatt int
vmapbuf(struct buf * bp,vsize_t len)2138d973866Smatt vmapbuf(struct buf *bp, vsize_t len)
2148d973866Smatt {
2158d973866Smatt 	vaddr_t kva;	/* Kernel VA (new to) */
2168d973866Smatt 
2178d973866Smatt 	if ((bp->b_flags & B_PHYS) == 0)
2188d973866Smatt 		panic("vmapbuf");
2198d973866Smatt 
2208d973866Smatt 	vaddr_t uva = trunc_page((vaddr_t)bp->b_data);
2218d973866Smatt 	const vaddr_t off = (vaddr_t)bp->b_data - uva;
2228d973866Smatt         len = round_page(off + len);
2238d973866Smatt 
2248d973866Smatt 	kva = uvm_km_alloc(phys_map, len, atop(uva) & uvmexp.colormask,
2258d973866Smatt 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
2268d973866Smatt 	KASSERT((atop(kva ^ uva) & uvmexp.colormask) == 0);
2278d973866Smatt 	bp->b_saveaddr = bp->b_data;
2288d973866Smatt 	bp->b_data = (void *)(kva + off);
2298d973866Smatt 	struct pmap * const upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
2308d973866Smatt 	do {
2318d973866Smatt 		paddr_t pa;	/* physical address */
2328d973866Smatt 		if (pmap_extract(upmap, uva, &pa) == false)
2338d973866Smatt 			panic("vmapbuf: null page frame");
2348d973866Smatt 		pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE,
2358d973866Smatt 		    PMAP_WIRED);
2368d973866Smatt 		uva += PAGE_SIZE;
2378d973866Smatt 		kva += PAGE_SIZE;
2388d973866Smatt 		len -= PAGE_SIZE;
2398d973866Smatt 	} while (len);
2408d973866Smatt 	pmap_update(pmap_kernel());
2418d973866Smatt 
2428d973866Smatt 	return 0;
2438d973866Smatt }
2448d973866Smatt 
2458d973866Smatt /*
2468d973866Smatt  * Unmap a previously-mapped user I/O request.
2478d973866Smatt  */
2488d973866Smatt void
vunmapbuf(struct buf * bp,vsize_t len)2498d973866Smatt vunmapbuf(struct buf *bp, vsize_t len)
2508d973866Smatt {
2518d973866Smatt 	vaddr_t kva;
2528d973866Smatt 
2538d973866Smatt 	KASSERT(bp->b_flags & B_PHYS);
2548d973866Smatt 
2558d973866Smatt 	kva = trunc_page((vaddr_t)bp->b_data);
2568d973866Smatt 	len = round_page((vaddr_t)bp->b_data - kva + len);
2578d973866Smatt 	pmap_kremove(kva, len);
2588d973866Smatt 	pmap_update(pmap_kernel());
2598d973866Smatt 	uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY);
2608d973866Smatt 	bp->b_data = bp->b_saveaddr;
2618d973866Smatt 	bp->b_saveaddr = NULL;
2628d973866Smatt }
2638d973866Smatt 
2648d973866Smatt int
mm_md_physacc(paddr_t pa,vm_prot_t prot)2658d973866Smatt mm_md_physacc(paddr_t pa, vm_prot_t prot)
2668d973866Smatt {
2678d973866Smatt         return (atop(pa) < physmem) ? 0 : EFAULT;
2688d973866Smatt }
2698d973866Smatt 
2708d973866Smatt #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
2718d973866Smatt bool
mm_md_direct_mapped_phys(paddr_t pa,vaddr_t * vap)2728d973866Smatt mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
2738d973866Smatt {
274*0cdd3313Sskrll 	if (pa >= physical_start && pa <= physical_end) {
2758d973866Smatt 		if (*vap)
2768d973866Smatt 			*vap = pmap_md_direct_map_paddr(pa);
2778d973866Smatt 		return true;
2788d973866Smatt 	}
2798d973866Smatt 
2808d973866Smatt 	return false;
2818d973866Smatt }
2828d973866Smatt #endif
283