xref: /netbsd/sys/arch/sh3/sh3/vm_machdep.c (revision c4a72b64)
1 /*	$NetBSD: vm_machdep.c,v 1.34 2002/09/22 05:42:20 gmcgarry Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
5  * Copyright (c) 1995 Charles M. Hannum.  All rights reserved.
6  * Copyright (c) 1982, 1986 The Regents of the University of California.
7  * Copyright (c) 1989, 1990 William Jolitz
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * the Systems Programming Group of the University of Utah Computer
12  * Science Department, and William Jolitz.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *	This product includes software developed by the University of
25  *	California, Berkeley and its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *	@(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
43  */
44 
45 /*
46  *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
47  */
48 
49 #include "opt_kstack_debug.h"
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/proc.h>
54 #include <sys/malloc.h>
55 #include <sys/vnode.h>
56 #include <sys/buf.h>
57 #include <sys/user.h>
58 #include <sys/core.h>
59 #include <sys/exec.h>
60 #include <sys/ptrace.h>
61 
62 #include <uvm/uvm_extern.h>
63 
64 #include <sh3/locore.h>
65 #include <sh3/cpu.h>
66 #include <sh3/reg.h>
67 #include <sh3/mmu.h>
68 #include <sh3/cache.h>
69 
70 /*
71  * Finish a fork operation, with process p2 nearly set up.
72  * Copy and update the pcb and trap frame, making the child ready to run.
73  *
74  * Rig the child's kernel stack so that it will start out in
75  * proc_trampoline() and call child_return() with p2 as an
76  * argument. This causes the newly-created child process to go
77  * directly to user level with an apparent return value of 0 from
78  * fork(), while the parent process returns normally.
79  *
80  * p1 is the process being forked; if p1 == &proc0, we are creating
81  * a kernel thread, and the return path and argument are specified with
82  * `func' and `arg'.
83  *
84  * If an alternate user-level stack is requested (with non-zero values
85  * in both the stack and stacksize args), set up the user stack pointer
86  * accordingly.
87  */
88 void
89 cpu_fork(struct proc *p1, struct proc *p2, void *stack,
90     size_t stacksize, void (*func)(void *), void *arg)
91 {
92 	extern void proc_trampoline(void);
93 	struct pcb *pcb;
94 	struct trapframe *tf;
95 	struct switchframe *sf;
96 	vaddr_t spbase, fptop;
97 #define	P1ADDR(x)	(SH3_PHYS_TO_P1SEG(*__pmap_kpte_lookup(x) & PG_PPN))
98 
99 	KDASSERT(!(p1 != curproc && p1 != &proc0));
100 
101 	/* Copy flags */
102 	p2->p_md.md_flags = p1->p_md.md_flags;
103 
104 #ifdef SH3
105 	/*
106 	 * Convert frame pointer top to P1. because SH3 can't make
107 	 * wired TLB entry, context store space accessing must not cause
108 	 * exception. For SH3, we are 4K page, P3/P1 conversion don't
109 	 * cause virtual-aliasing.
110 	 */
111 	if (CPU_IS_SH3) {
112 		pcb = (struct pcb *)P1ADDR((vaddr_t)&p2->p_addr->u_pcb);
113 		p2->p_md.md_pcb = pcb;
114 		fptop = (vaddr_t)pcb + NBPG;
115 	}
116 #endif /* SH3 */
117 #ifdef SH4
118 	/* SH4 can make wired entry, no need to convert to P1. */
119 	if (CPU_IS_SH4) {
120 		pcb = &p2->p_addr->u_pcb;
121 		p2->p_md.md_pcb = pcb;
122 		fptop = (vaddr_t)pcb + NBPG;
123 	}
124 #endif /* SH4 */
125 
126 	/* set up the kernel stack pointer */
127 	spbase = (vaddr_t)p2->p_addr + NBPG;
128 #ifdef P1_STACK
129 	/* Convert to P1 from P3 */
130 	/*
131 	 * wbinv u-area to avoid cache-aliasing, since kernel stack
132 	 * is accessed from P1 instead of P3.
133 	 */
134 	if (SH_HAS_VIRTUAL_ALIAS)
135 		sh_dcache_wbinv_range((vaddr_t)p2->p_addr, USPACE);
136 	spbase = P1ADDR(spbase);
137 #else /* P1_STACK */
138 	/* Prepare u-area PTEs */
139 #ifdef SH3
140 	if (CPU_IS_SH3)
141 		sh3_switch_setup(p2);
142 #endif
143 #ifdef SH4
144 	if (CPU_IS_SH4)
145 		sh4_switch_setup(p2);
146 #endif
147 #endif /* P1_STACK */
148 
149 #ifdef KSTACK_DEBUG
150 	/* Fill magic number for tracking */
151 	memset((char *)fptop - NBPG + sizeof(struct user), 0x5a,
152 	    NBPG - sizeof(struct user));
153 	memset((char *)spbase, 0xa5, (USPACE - NBPG));
154 	memset(&pcb->pcb_sf, 0xb4, sizeof(struct switchframe));
155 #endif /* KSTACK_DEBUG */
156 
157 	/*
158 	 * Copy the user context.
159 	 */
160 	p2->p_md.md_regs = tf = (struct trapframe *)fptop - 1;
161 	memcpy(tf, p1->p_md.md_regs, sizeof(struct trapframe));
162 
163 	/*
164 	 * If specified, give the child a different stack.
165 	 */
166 	if (stack != NULL)
167 		tf->tf_r15 = (u_int)stack + stacksize;
168 
169 	/* Setup switch frame */
170 	sf = &pcb->pcb_sf;
171 	sf->sf_r11 = (int)arg;		/* proc_trampoline hook func */
172 	sf->sf_r12 = (int)func;		/* proc_trampoline hook func's arg */
173 	sf->sf_r15 = spbase + USPACE - NBPG;	/* current stack pointer */
174 	sf->sf_r7_bank = sf->sf_r15;	/* stack top */
175 	sf->sf_r6_bank = (vaddr_t)tf;	/* current frame pointer */
176 	/* when switch to me, jump to proc_trampoline */
177 	sf->sf_pr  = (int)proc_trampoline;
178 	/*
179 	 * Enable interrupt when switch frame is restored, since
180 	 * kernel thread begin to run without restoring trapframe.
181 	 */
182 	sf->sf_sr = PSL_MD;		/* kernel mode, interrupt enable */
183 }
184 
185 /*
186  * void cpu_exit(sturct proc *p):
187  *	+ Change kernel context to proc0's one.
188  *	+ Schedule freeing process 'p' resources.
189  *	+ switch to another process.
190  */
191 void
192 cpu_exit(struct proc *p)
193 {
194 	struct switchframe *sf;
195 
196 	splsched();
197 	uvmexp.swtch++;
198 
199 	/* Switch to proc0 stack */
200 	curproc = 0;
201 	curpcb = proc0.p_md.md_pcb;
202 	sf = &curpcb->pcb_sf;
203 	__asm__ __volatile__(
204 		"mov	%0, r15;"	/* current stack */
205 		"ldc	%1, r6_bank;"	/* current frame pointer */
206 		"ldc	%2, r7_bank;"	/* stack top */
207 		::
208 		"r"(sf->sf_r15),
209 		"r"(sf->sf_r6_bank),
210 		"r"(sf->sf_r7_bank));
211 
212 	/* Schedule freeing process resources */
213 	exit2(p);
214 
215 	cpu_switch(p, NULL);
216 	/* NOTREACHED */
217 }
218 
219 /*
220  * Dump the machine specific segment at the start of a core dump.
221  */
222 struct md_core {
223 	struct reg intreg;
224 };
225 
226 int
227 cpu_coredump(struct proc *p, struct vnode *vp, struct ucred *cred,
228     struct core *chdr)
229 {
230 	struct md_core md_core;
231 	struct coreseg cseg;
232 	int error;
233 
234 	CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0);
235 	chdr->c_hdrsize = ALIGN(sizeof(*chdr));
236 	chdr->c_seghdrsize = ALIGN(sizeof(cseg));
237 	chdr->c_cpusize = sizeof(md_core);
238 
239 	/* Save integer registers. */
240 	error = process_read_regs(p, &md_core.intreg);
241 	if (error)
242 		return error;
243 
244 
245 	CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU);
246 	cseg.c_addr = 0;
247 	cseg.c_size = chdr->c_cpusize;
248 
249 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize,
250 	    (off_t)chdr->c_hdrsize, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred,
251 	    (int *)0, p);
252 	if (error)
253 		return error;
254 
255 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&md_core, sizeof(md_core),
256 	    (off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE,
257 	    IO_NODELOCKED|IO_UNIT, cred, (int *)0, p);
258 	if (error)
259 		return error;
260 
261 	chdr->c_nseg++;
262 	return 0;
263 }
264 
265 /*
266  * Move pages from one kernel virtual address to another.
267  * Both addresses are assumed to reside in the pmap_kernel().
268  */
269 void
270 pagemove(caddr_t from, caddr_t to, size_t size)
271 {
272 	pt_entry_t *fpte, *tpte;
273 
274 	if ((size & PGOFSET) != 0)
275 		panic("pagemove");
276 	fpte = __pmap_kpte_lookup((vaddr_t)from);
277 	tpte = __pmap_kpte_lookup((vaddr_t)to);
278 
279 	if (SH_HAS_VIRTUAL_ALIAS)
280 		sh_dcache_wbinv_range((vaddr_t)from, size);
281 
282 	while (size > 0) {
283 		*tpte++ = *fpte;
284 		*fpte++ = 0;
285 		sh_tlb_invalidate_addr(0, (vaddr_t)from);
286 		sh_tlb_invalidate_addr(0, (vaddr_t)to);
287 		from += NBPG;
288 		to += NBPG;
289 		size -= NBPG;
290 	}
291 }
292 
293 /*
294  * Map an IO request into kernel virtual address space.  Requests fall into
295  * one of five catagories:
296  *
297  *	B_PHYS|B_UAREA:	User u-area swap.
298  *			Address is relative to start of u-area (p_addr).
299  *	B_PHYS|B_PAGET:	User page table swap.
300  *			Address is a kernel VA in usrpt (Usrptmap).
301  *	B_PHYS|B_DIRTY:	Dirty page push.
302  *			Address is a VA in proc2's address space.
303  *	B_PHYS|B_PGIN:	Kernel pagein of user pages.
304  *			Address is VA in user's address space.
305  *	B_PHYS:		User "raw" IO request.
306  *			Address is VA in user's address space.
307  *
308  * All requests are (re)mapped into kernel VA space via the phys_map
309  * (a name with only slightly more meaning than "kernel_map")
310  */
311 
312 void
313 vmapbuf(struct buf *bp, vsize_t len)
314 {
315 	vaddr_t faddr, taddr, off;
316 	paddr_t fpa;
317 
318 	if ((bp->b_flags & B_PHYS) == 0)
319 		panic("vmapbuf");
320 	faddr = trunc_page((vaddr_t)bp->b_saveaddr = bp->b_data);
321 	off = (vaddr_t)bp->b_data - faddr;
322 	len = round_page(off + len);
323 	taddr= uvm_km_valloc_wait(phys_map, len);
324 	bp->b_data = (caddr_t)(taddr + off);
325 	/*
326 	 * The region is locked, so we expect that pmap_pte() will return
327 	 * non-NULL.
328 	 * XXX: unwise to expect this in a multithreaded environment.
329 	 * anything can happen to a pmap between the time we lock a
330 	 * region, release the pmap lock, and then relock it for
331 	 * the pmap_extract().
332 	 *
333 	 * no need to flush TLB since we expect nothing to be mapped
334 	 * where we we just allocated (TLB will be flushed when our
335 	 * mapping is removed).
336 	 */
337 	while (len) {
338 		pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
339 			     faddr, &fpa);
340 		pmap_enter(vm_map_pmap(phys_map), taddr, fpa,
341 		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
342 		faddr += PAGE_SIZE;
343 		taddr += PAGE_SIZE;
344 		len -= PAGE_SIZE;
345 	}
346 	pmap_update(vm_map_pmap(phys_map));
347 }
348 
349 /*
350  * Free the io map PTEs associated with this IO operation.
351  * We also invalidate the TLB entries and restore the original b_addr.
352  */
353 void
354 vunmapbuf(struct buf *bp, vsize_t len)
355 {
356 	vaddr_t addr, off;
357 
358 	if ((bp->b_flags & B_PHYS) == 0)
359 		panic("vunmapbuf");
360 	addr = trunc_page((vaddr_t)bp->b_data);
361 	off = (vaddr_t)bp->b_data - addr;
362 	len = round_page(off + len);
363 	pmap_remove(vm_map_pmap(phys_map), addr, addr + len);
364 	pmap_update(vm_map_pmap(phys_map));
365 	uvm_km_free_wakeup(phys_map, addr, len);
366 	bp->b_data = bp->b_saveaddr;
367 	bp->b_saveaddr = 0;
368 }
369