xref: /netbsd/sys/arch/sh3/sh3/vm_machdep.c (revision 6550d01e)
1 /*	$NetBSD: vm_machdep.c,v 1.70 2011/02/01 01:54:14 uwe Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
5  * Copyright (c) 1982, 1986 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department, and William Jolitz.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
37  */
38 
39 /*-
40  * Copyright (c) 1995 Charles M. Hannum.  All rights reserved.
41  * Copyright (c) 1989, 1990 William Jolitz
42  * All rights reserved.
43  *
44  * This code is derived from software contributed to Berkeley by
45  * the Systems Programming Group of the University of Utah Computer
46  * Science Department, and William Jolitz.
47  *
48  * Redistribution and use in source and binary forms, with or without
49  * modification, are permitted provided that the following conditions
50  * are met:
51  * 1. Redistributions of source code must retain the above copyright
52  *    notice, this list of conditions and the following disclaimer.
53  * 2. Redistributions in binary form must reproduce the above copyright
54  *    notice, this list of conditions and the following disclaimer in the
55  *    documentation and/or other materials provided with the distribution.
56  * 3. All advertising materials mentioning features or use of this software
57  *    must display the following acknowledgement:
58  *	This product includes software developed by the University of
59  *	California, Berkeley and its contributors.
60  * 4. Neither the name of the University nor the names of its contributors
61  *    may be used to endorse or promote products derived from this software
62  *    without specific prior written permission.
63  *
64  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
65  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
66  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
67  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
68  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
69  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
70  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
71  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
72  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
73  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
74  * SUCH DAMAGE.
75  *
76  *	@(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
77  */
78 
79 /*
80  *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
81  */
82 
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.70 2011/02/01 01:54:14 uwe Exp $");
85 
86 #include "opt_kstack_debug.h"
87 
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/proc.h>
91 #include <sys/malloc.h>
92 #include <sys/vnode.h>
93 #include <sys/buf.h>
94 #include <sys/core.h>
95 #include <sys/exec.h>
96 #include <sys/ptrace.h>
97 #include <sys/syscall.h>
98 #include <sys/ktrace.h>
99 
100 #include <uvm/uvm_extern.h>
101 
102 #include <sh3/locore.h>
103 #include <sh3/cpu.h>
104 #include <sh3/pcb.h>
105 #include <sh3/mmu.h>
106 #include <sh3/cache.h>
107 #include <sh3/userret.h>
108 
109 extern void lwp_trampoline(void);
110 
111 static void sh3_setup_uarea(struct lwp *);
112 
113 
114 /*
115  * Finish a fork operation, with lwp l2 nearly set up.  Copy and
116  * update the pcb and trap frame, making the child ready to run.
117  *
118  * Rig the child's kernel stack so that it will start out in
119  * lwp_trampoline() and call child_return() with l2 as an argument.
120  * This causes the newly-created lwp to go directly to user level with
121  * an apparent return value of 0 from fork(), while the parent lwp
122  * returns normally.
123  *
124  * l1 is the lwp being forked; if l1 == &lwp0, we are creating a
125  * kernel thread, and the return path and argument are specified with
126  * `func' and `arg'.
127  *
128  * If an alternate user-level stack is requested (with non-zero values
129  * in both the stack and stacksize args), set up the user stack
130  * pointer accordingly.
131  */
132 void
133 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack,
134     size_t stacksize, void (*func)(void *), void *arg)
135 {
136 	struct pcb *pcb;
137 	struct switchframe *sf;
138 
139 #if 0 /* FIXME: probably wrong for yamt-idlelwp */
140 	KDASSERT(l1 == curlwp || l1 == &lwp0);
141 #endif
142 
143 	sh3_setup_uarea(l2);
144 
145 	l2->l_md.md_flags = l1->l_md.md_flags;
146 	l2->l_md.md_astpending = 0;
147 
148 	/* Copy user context, may be give a different stack */
149 	memcpy(l2->l_md.md_regs, l1->l_md.md_regs, sizeof(struct trapframe));
150 	if (stack != NULL)
151 		l2->l_md.md_regs->tf_r15 = (u_int)stack + stacksize;
152 
153 	/* When l2 is switched to, jump to the trampoline */
154 	pcb = lwp_getpcb(l2);
155 	sf = &pcb->pcb_sf;
156 	sf->sf_pr  = (int)lwp_trampoline;
157 	sf->sf_r10 = (int)l2;	/* "new" lwp for lwp_startup() */
158 	sf->sf_r11 = (int)arg;	/* hook function/argument */
159 	sf->sf_r12 = (int)func;
160 }
161 
162 
163 /*
164  * Reset the stack pointer for the lwp and arrange for it to call the
165  * specified function with the specified argument on next switch.
166  */
167 void
168 cpu_setfunc(struct lwp *l, void (*func)(void *), void *arg)
169 {
170 	struct pcb *pcb = lwp_getpcb(l);
171 	struct switchframe *sf = &pcb->pcb_sf;
172 
173 	sh3_setup_uarea(l);
174 
175 	l->l_md.md_regs->tf_ssr = PSL_USERSET;
176 
177 	/* When lwp is switched to, jump to the trampoline */
178 	sf->sf_pr  = (int)lwp_trampoline;
179 	sf->sf_r10 = (int)l;	/* "new" lwp for lwp_startup() */
180 	sf->sf_r11 = (int)arg;	/* hook function/argument */
181 	sf->sf_r12 = (int)func;
182 }
183 
184 static void
185 sh3_setup_uarea(struct lwp *l)
186 {
187 	struct pcb *pcb;
188 	struct trapframe *tf;
189 	struct switchframe *sf;
190 	vaddr_t uv, spbase, fptop;
191 #define	P1ADDR(x)	(SH3_PHYS_TO_P1SEG(*__pmap_kpte_lookup(x) & PG_PPN))
192 
193 	pcb = lwp_getpcb(l);
194 	pcb->pcb_onfault = NULL;
195 	pcb->pcb_faultbail = 0;
196 #ifdef SH3
197 	/*
198 	 * Accessing context store space must not cause exceptions.
199 	 * SH4 can make wired TLB entries so P3 address for PCB is ok.
200 	 * SH3 cannot, so we need to convert to P1.  P3/P1 conversion
201 	 * doesn't cause virtual-aliasing.
202 	 */
203 	if (CPU_IS_SH3)
204 		pcb = (struct pcb *)P1ADDR((vaddr_t)pcb);
205 #endif /* SH3 */
206 	l->l_md.md_pcb = pcb;
207 
208 	/* stack for trapframes */
209 	fptop = (vaddr_t)pcb + PAGE_SIZE;
210 	tf = (struct trapframe *)fptop - 1;
211 	l->l_md.md_regs = tf;
212 
213 	/* set up the kernel stack pointer */
214 	uv = uvm_lwp_getuarea(l);
215 	spbase = uv + PAGE_SIZE;
216 #ifdef P1_STACK
217 	/*
218 	 * wbinv u-area to avoid cache-aliasing, since kernel stack
219 	 * is accessed from P1 instead of P3.
220 	 */
221 	if (SH_HAS_VIRTUAL_ALIAS)
222 		sh_dcache_wbinv_range(uv, USPACE);
223 	spbase = P1ADDR(spbase);
224 #else /* !P1_STACK */
225 #ifdef SH4
226 	/* Prepare u-area PTEs */
227 	if (CPU_IS_SH4)
228 		sh4_switch_setup(l);
229 #endif
230 #endif /* !P1_STACK */
231 
232 #ifdef KSTACK_DEBUG
233 	/* Fill magic number for tracking */
234 	memset((char *)fptop - PAGE_SIZE + sizeof(struct pcb), 0x5a,
235 	    PAGE_SIZE - sizeof(struct pcb));
236 	memset((char *)spbase, 0xa5, (USPACE - PAGE_SIZE));
237 	memset(&pcb->pcb_sf, 0xb4, sizeof(struct switchframe));
238 #endif /* KSTACK_DEBUG */
239 
240 	/* Setup kernel stack and trapframe stack */
241 	sf = &pcb->pcb_sf;
242 	sf->sf_r6_bank = (vaddr_t)tf;
243 	sf->sf_r7_bank = spbase + USPACE - PAGE_SIZE;
244 	sf->sf_r15 = sf->sf_r7_bank;
245 
246 	/*
247 	 * Enable interrupts when switch frame is restored, since
248 	 * kernel thread begins to run without restoring trapframe.
249 	 */
250 	sf->sf_sr = PSL_MD;	/* kernel mode, interrupt enable */
251 }
252 
253 
254 /*
255  * fork &co pass this routine to newlwp to finish off child creation
256  * (see cpu_lwp_fork above and lwp_trampoline for details).
257  *
258  * When this function returns, new lwp returns to user mode.
259  */
260 void
261 child_return(void *arg)
262 {
263 	struct lwp *l = arg;
264 	struct trapframe *tf = l->l_md.md_regs;
265 
266 	tf->tf_r0 = 0;		/* fork(2) returns 0 in child */
267 	tf->tf_ssr |= PSL_TBIT; /* syscall succeeded */
268 
269 	userret(l);
270 	ktrsysret(SYS_fork, 0, 0);
271 }
272 
273 /*
274  * struct emul e_startlwp (for _lwp_create(2))
275  */
276 void
277 startlwp(void *arg)
278 {
279 	ucontext_t *uc = arg;
280 	lwp_t *l = curlwp;
281 	int error;
282 
283 	error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
284 	KASSERT(error == 0);
285 
286 	kmem_free(uc, sizeof(ucontext_t));
287 	userret(l);
288 }
289 
290 /*
291  * Exit hook
292  */
293 void
294 cpu_lwp_free(struct lwp *l, int proc)
295 {
296 
297 	/* Nothing to do */
298 }
299 
300 
301 /*
302  * lwp_free() hook
303  */
304 void
305 cpu_lwp_free2(struct lwp *l)
306 {
307 
308 	/* Nothing to do */
309 }
310 
311 /*
312  * Map an IO request into kernel virtual address space.  Requests fall into
313  * one of five catagories:
314  *
315  *	B_PHYS|B_UAREA:	User u-area swap.
316  *			Address is relative to start of u-area.
317  *	B_PHYS|B_PAGET:	User page table swap.
318  *			Address is a kernel VA in usrpt (Usrptmap).
319  *	B_PHYS|B_DIRTY:	Dirty page push.
320  *			Address is a VA in proc2's address space.
321  *	B_PHYS|B_PGIN:	Kernel pagein of user pages.
322  *			Address is VA in user's address space.
323  *	B_PHYS:		User "raw" IO request.
324  *			Address is VA in user's address space.
325  *
326  * All requests are (re)mapped into kernel VA space via the phys_map
327  * (a name with only slightly more meaning than "kernel_map")
328  */
329 
330 void
331 vmapbuf(struct buf *bp, vsize_t len)
332 {
333 	vaddr_t faddr, taddr, off;
334 	paddr_t fpa;
335 	pmap_t kpmap, upmap;
336 
337 	if ((bp->b_flags & B_PHYS) == 0)
338 		panic("vmapbuf");
339 	bp->b_saveaddr = bp->b_data;
340 	faddr = trunc_page((vaddr_t)bp->b_data);
341 	off = (vaddr_t)bp->b_data - faddr;
342 	len = round_page(off + len);
343 	taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
344 	bp->b_data = (void *)(taddr + off);
345 	/*
346 	 * The region is locked, so we expect that pmap_pte() will return
347 	 * non-NULL.
348 	 * XXX: unwise to expect this in a multithreaded environment.
349 	 * anything can happen to a pmap between the time we lock a
350 	 * region, release the pmap lock, and then relock it for
351 	 * the pmap_extract().
352 	 *
353 	 * no need to flush TLB since we expect nothing to be mapped
354 	 * where we we just allocated (TLB will be flushed when our
355 	 * mapping is removed).
356 	 */
357 	upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
358 	kpmap = vm_map_pmap(phys_map);
359 	while (len) {
360 		pmap_extract(upmap, faddr, &fpa);
361 		pmap_enter(kpmap, taddr, fpa,
362 		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
363 		faddr += PAGE_SIZE;
364 		taddr += PAGE_SIZE;
365 		len -= PAGE_SIZE;
366 	}
367 	pmap_update(kpmap);
368 }
369 
370 /*
371  * Free the io map PTEs associated with this IO operation.
372  * We also invalidate the TLB entries and restore the original b_addr.
373  */
374 void
375 vunmapbuf(struct buf *bp, vsize_t len)
376 {
377 	vaddr_t addr, off;
378 	pmap_t kpmap;
379 
380 	if ((bp->b_flags & B_PHYS) == 0)
381 		panic("vunmapbuf");
382 	addr = trunc_page((vaddr_t)bp->b_data);
383 	off = (vaddr_t)bp->b_data - addr;
384 	len = round_page(off + len);
385 	kpmap = vm_map_pmap(phys_map);
386 	pmap_remove(kpmap, addr, addr + len);
387 	pmap_update(kpmap);
388 	uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
389 	bp->b_data = bp->b_saveaddr;
390 	bp->b_saveaddr = 0;
391 }
392