1 /* $NetBSD: vm_machdep.c,v 1.1 2014/08/10 05:47:37 matt Exp $ */
2 
3 /*-
4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.1 2014/08/10 05:47:37 matt Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc.h>
38 #include <sys/malloc.h>
39 #include <sys/vnode.h>
40 #include <sys/cpu.h>
41 #include <sys/buf.h>
42 #include <sys/pmc.h>
43 #include <sys/exec.h>
44 #include <sys/syslog.h>
45 
46 #include <uvm/uvm_extern.h>
47 
48 #include <aarch64/locore.h>
49 #include <aarch64/pcb.h>
50 
51 /*
52  * Special compilation symbols:
53  *
54  * STACKCHECKS - Fill undefined and supervisor stacks with a known pattern
55  *		 on forking and check the pattern on exit, reporting
56  *		 the amount of stack used.
57  */
58 
59 void
cpu_proc_fork(struct proc * p1,struct proc * p2)60 cpu_proc_fork(struct proc *p1, struct proc *p2)
61 {
62 }
63 
64 /*
65  * Finish a fork operation, with LWP l2 nearly set up.
66  *
67  * Copy and update the pcb and trapframe, making the child ready to run.
68  *
69  * Rig the child's kernel stack so that it will start out in
70  * lwp_trampoline() which will call the specified func with the argument arg.
71  *
72  * If an alternate user-level stack is requested (with non-zero values
73  * in both the stack and stacksize args), set up the user stack pointer
74  * accordingly.
75  */
76 void
cpu_lwp_fork(struct lwp * l1,struct lwp * l2,void * stack,size_t stacksize,void (* func)(void *),void * arg)77 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
78     void (*func)(void *), void *arg)
79 {
80 	const struct pcb * const pcb1 = lwp_getpcb(l1);
81 	struct pcb * const pcb2 = lwp_getpcb(l2);
82 
83 #ifdef PMAP_DEBUG
84 	if (pmap_debug_level > 0)
85 		printf("cpu_lwp_fork: %p %p %p %p\n", l1, l2, curlwp, &lwp0);
86 #endif	/* PMAP_DEBUG */
87 
88 	/* Copy the pcb */
89 	*pcb2 = *pcb1;
90 
91 	/*
92 	 * Disable FP for a newly created LWP but remember if the
93 	 * FP state is valid.
94 	 */
95 	l2->l_md.md_cpacr = CPACR_FPEN_NONE;
96 
97 	/*
98 	 * Set up the kernel stack for the process.
99 	 * Note: this stack is not in use if we are forking from p1
100 	 */
101 	vaddr_t uv = uvm_lwp_getuarea(l2);
102 	struct trapframe * const utf = (struct trapframe *)(uv + USPACE) - 1;
103 	l2->l_md.md_utf = utf;
104 
105 	*utf = *l1->l_md.md_utf;
106 
107 	/*
108 	 * If specified, give the child a different stack (make sure
109 	 * it's 16-byte aligned).
110 	 */
111 	if (stack != NULL)
112 		utf->tf_sp = ((vaddr_t)(stack) + stacksize) & -16;
113 
114 	utf->tf_spsr = SPSR_M_EL0T;		/* for returning to userspace */
115 
116 	struct trapframe * const ktf = utf - 1;
117 	ktf->tf_chain = utf;
118 	ktf->tf_reg[27] = (u_int)func;
119 	ktf->tf_reg[28] = (u_int)arg;
120 	ktf->tf_reg[29] = 0;
121 	KASSERT(reg_daif_read() == 0);
122 	ktf->tf_spsr = SPSR_M_EL1T;
123 	ktf->tf_lr = (uintptr_t)lwp_trampoline;
124 
125 	l2->l_md.md_ktf = ktf;
126 }
127 
128 /*
129  * cpu_exit is called as the last action during exit.
130  *
131  * We clean up a little and then call switch_exit() with the old proc as an
132  * argument.  switch_exit() first switches to lwp0's context, and finally
133  * jumps into switch() to wait for another process to wake up.
134  */
135 
136 void
cpu_lwp_free(struct lwp * l,int proc)137 cpu_lwp_free(struct lwp *l, int proc)
138 {
139 }
140 
141 void
cpu_lwp_free2(struct lwp * l)142 cpu_lwp_free2(struct lwp *l)
143 {
144 }
145 
146 /*
147  * Map a user I/O request into kernel virtual address space.
148  * Note: the pages are already locked by uvm_vslock(), so we
149  * do not need to pass an access_type to pmap_enter().
150  */
151 int
vmapbuf(struct buf * bp,vsize_t len)152 vmapbuf(struct buf *bp, vsize_t len)
153 {
154 	vaddr_t faddr, taddr, off;
155 	paddr_t fpa;
156 
157 
158 	if ((bp->b_flags & B_PHYS) == 0)
159 		panic("vmapbuf");
160 
161 	bp->b_saveaddr = bp->b_data;
162 	faddr = trunc_page((vaddr_t)bp->b_data);
163 	off = (vaddr_t)bp->b_data - faddr;
164 	len = round_page(off + len);
165 	taddr = uvm_km_alloc(phys_map, len, atop(faddr) & uvmexp.colormask,
166 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
167 	bp->b_data = (void *)(taddr + off);
168 
169 	/*
170 	 * The region is locked, so we expect that pmap_pte() will return
171 	 * non-NULL.
172 	 */
173 	while (len) {
174 		(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
175 		    faddr, &fpa);
176 		pmap_enter(pmap_kernel(), taddr, fpa,
177 			VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
178 		faddr += PAGE_SIZE;
179 		taddr += PAGE_SIZE;
180 		len -= PAGE_SIZE;
181 	}
182 	pmap_update(pmap_kernel());
183 
184 	return 0;
185 }
186 
187 /*
188  * Unmap a previously-mapped user I/O request.
189  */
190 void
vunmapbuf(struct buf * bp,vsize_t len)191 vunmapbuf(struct buf *bp, vsize_t len)
192 {
193 	vaddr_t addr, off;
194 
195 	if ((bp->b_flags & B_PHYS) == 0)
196 		panic("vunmapbuf");
197 
198 	/*
199 	 * Make sure the cache does not have dirty data for the
200 	 * pages we had mapped.
201 	 */
202 	addr = trunc_page((vaddr_t)bp->b_data);
203 	off = (vaddr_t)bp->b_data - addr;
204 	len = round_page(off + len);
205 
206 	pmap_remove(pmap_kernel(), addr, addr + len);
207 	pmap_update(pmap_kernel());
208 	uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
209 	bp->b_data = bp->b_saveaddr;
210 	bp->b_saveaddr = 0;
211 }
212