xref: /netbsd/sys/arch/mips/mips/vm_machdep.c (revision bf9ec67e)
1 /*	$NetBSD: vm_machdep.c,v 1.88 2002/03/05 15:57:20 simonb Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department and Ralph Campbell.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  * from: Utah Hdr: vm_machdep.c 1.21 91/04/06
41  *
42  *	@(#)vm_machdep.c	8.3 (Berkeley) 1/4/94
43  */
44 
45 #include "opt_ddb.h"
46 
47 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
48 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.88 2002/03/05 15:57:20 simonb Exp $");
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/proc.h>
53 #include <sys/malloc.h>
54 #include <sys/buf.h>
55 #include <sys/vnode.h>
56 #include <sys/user.h>
57 #include <sys/core.h>
58 #include <sys/exec.h>
59 
60 #include <uvm/uvm_extern.h>
61 
62 #include <mips/cache.h>
63 #include <mips/regnum.h>
64 #include <mips/locore.h>
65 #include <mips/pte.h>
66 #include <mips/psl.h>
67 #include <machine/cpu.h>
68 
69 paddr_t kvtophys(vaddr_t);	/* XXX */
70 
71 /*
72  * Finish a fork operation, with process p2 nearly set up.
73  * Copy and update the pcb and trap frame, making the child ready to run.
74  *
75  * Rig the child's kernel stack so that it will start out in
76  * proc_trampoline() and call child_return() with p2 as an
77  * argument. This causes the newly-created child process to go
78  * directly to user level with an apparent return value of 0 from
79  * fork(), while the parent process returns normally.
80  *
81  * p1 is the process being forked; if p1 == &proc0, we are creating
82  * a kernel thread, and the return path and argument are specified with
83  * `func' and `arg'.
84  *
85  * If an alternate user-level stack is requested (with non-zero values
86  * in both the stack and stacksize args), set up the user stack pointer
87  * accordingly.
88  */
89 void
90 cpu_fork(p1, p2, stack, stacksize, func, arg)
91 	struct proc *p1, *p2;
92 	void *stack;
93 	size_t stacksize;
94 	void (*func)(void *);
95 	void *arg;
96 {
97 	struct pcb *pcb;
98 	struct frame *f;
99 	pt_entry_t *pte;
100 	int i, x;
101 
102 #ifdef MIPS3_PLUS
103 	/*
104 	 * To eliminate virtual aliases created by pmap_zero_page(),
105 	 * this cache flush operation is necessary.
106 	 * VCED on kernel stack is not allowed.
107 	 * XXXJRT Confirm that this is necessry, and/or fix
108 	 * XXXJRT pmap_zero_page().
109 	 */
110 	if (CPUISMIPS3 && mips_sdcache_line_size)
111 		mips_dcache_wbinv_range((vaddr_t) p2->p_addr, USPACE);
112 #endif
113 
114 #ifdef DIAGNOSTIC
115 	/*
116 	 * If p1 != curproc && p1 == &proc0, we're creating a kernel thread.
117 	 */
118 	if (p1 != curproc && p1 != &proc0)
119 		panic("cpu_fork: curproc");
120 #endif
121 	if ((p1->p_md.md_flags & MDP_FPUSED) && p1 == fpcurproc)
122 		savefpregs(p1);
123 
124 	/*
125 	 * Copy pcb from proc p1 to p2.
126 	 * Copy p1 trapframe atop on p2 stack space, so return to user mode
127 	 * will be to right address, with correct registers.
128 	 */
129 	memcpy(&p2->p_addr->u_pcb, &p1->p_addr->u_pcb, sizeof(struct pcb));
130 	f = (struct frame *)((caddr_t)p2->p_addr + USPACE) - 1;
131 	memcpy(f, p1->p_md.md_regs, sizeof(struct frame));
132 	memset((caddr_t)f - 24, 0, 24);		/* ? required ? */
133 
134 	/*
135 	 * If specified, give the child a different stack.
136 	 */
137 	if (stack != NULL)
138 		f->f_regs[SP] = (u_int)stack + stacksize;
139 
140 	p2->p_md.md_regs = (void *)f;
141 	p2->p_md.md_flags = p1->p_md.md_flags & MDP_FPUSED;
142 	x = (MIPS_HAS_R4K_MMU) ? (MIPS3_PG_G|MIPS3_PG_RO|MIPS3_PG_WIRED) : MIPS1_PG_G;
143 	pte = kvtopte(p2->p_addr);
144 	for (i = 0; i < UPAGES; i++)
145 		p2->p_md.md_upte[i] = pte[i].pt_entry &~ x;
146 
147 	pcb = &p2->p_addr->u_pcb;
148 	pcb->pcb_context[10] = (int)proc_trampoline;	/* RA */
149 	pcb->pcb_context[8] = (int)f - 24;		/* SP */
150 	pcb->pcb_context[0] = (int)func;		/* S0 */
151 	pcb->pcb_context[1] = (int)arg;			/* S1 */
152 	pcb->pcb_context[11] |= PSL_LOWIPL;		/* SR */
153 #ifdef IPL_ICU_MASK
154 	pcb->pcb_ppl = 0;	/* machine dependent interrupt mask */
155 #endif
156 }
157 
158 /*
159  * Finish a swapin operation.
160  * We neded to update the cached PTEs for the user area in the
161  * machine dependent part of the proc structure.
162  */
163 void
164 cpu_swapin(p)
165 	struct proc *p;
166 {
167 	pt_entry_t *pte;
168 	int i, x;
169 
170 	/*
171 	 * Cache the PTEs for the user area in the machine dependent
172 	 * part of the proc struct so cpu_switch() can quickly map in
173 	 * the user struct and kernel stack.
174 	 */
175 	x = (MIPS_HAS_R4K_MMU) ? (MIPS3_PG_G|MIPS3_PG_RO|MIPS3_PG_WIRED) : MIPS1_PG_G;
176 	pte = kvtopte(p->p_addr);
177 	for (i = 0; i < UPAGES; i++)
178 		p->p_md.md_upte[i] = pte[i].pt_entry &~ x;
179 }
180 
181 /*
182  * cpu_exit is called as the last action during exit.
183  *
184  * We clean up a little and then call switch_exit() with the old proc as an
185  * argument.  switch_exit() first switches to proc0's PCB and stack,
186  * schedules the dead proc's vmspace and stack to be freed, then jumps
187  * into the middle of cpu_switch(), as if it were switching from proc0.
188  */
189 void
190 cpu_exit(p)
191 	struct proc *p;
192 {
193 	void switch_exit(struct proc *);
194 
195 	if ((p->p_md.md_flags & MDP_FPUSED) && p == fpcurproc)
196 		fpcurproc = (struct proc *)0;
197 
198 	uvmexp.swtch++;
199 	(void)splhigh();
200 	switch_exit(p);
201 	/* NOTREACHED */
202 }
203 
204 /*
205  * Dump the machine specific segment at the start of a core dump.
206  */
207 int
208 cpu_coredump(p, vp, cred, chdr)
209 	struct proc *p;
210 	struct vnode *vp;
211 	struct ucred *cred;
212 	struct core *chdr;
213 {
214 	int error;
215 	struct coreseg cseg;
216 	struct cpustate {
217 		struct frame frame;
218 		struct fpreg fpregs;
219 	} cpustate;
220 
221 	CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0);
222 	chdr->c_hdrsize = ALIGN(sizeof(struct core));
223 	chdr->c_seghdrsize = ALIGN(sizeof(struct coreseg));
224 	chdr->c_cpusize = sizeof(struct cpustate);
225 
226 	if ((p->p_md.md_flags & MDP_FPUSED) && p == fpcurproc)
227 		savefpregs(p);
228 	cpustate.frame = *(struct frame *)p->p_md.md_regs;
229 	cpustate.fpregs = p->p_addr->u_pcb.pcb_fpregs;
230 
231 	CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU);
232 	cseg.c_addr = 0;
233 	cseg.c_size = chdr->c_cpusize;
234 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize,
235 	    (off_t)chdr->c_hdrsize, UIO_SYSSPACE,
236 	    IO_NODELOCKED|IO_UNIT, cred, NULL, p);
237 	if (error)
238 		return error;
239 
240 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cpustate,
241 			(off_t)chdr->c_cpusize,
242 			(off_t)(chdr->c_hdrsize + chdr->c_seghdrsize),
243 			UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT,
244 			cred, NULL, p);
245 
246 	if (!error)
247 		chdr->c_nseg++;
248 
249 	return error;
250 }
251 
252 /*
253  * Move pages from one kernel virtual address to another.
254  * Both addresses are assumed to reside in the Sysmap,
255  * and size must be a multiple of NBPG.
256  */
257 void
258 pagemove(from, to, size)
259 	caddr_t from, to;
260 	size_t size;
261 {
262 	pt_entry_t *fpte, *tpte;
263 	paddr_t invalid;
264 
265 	if (size % NBPG)
266 		panic("pagemove");
267 	fpte = kvtopte(from);
268 	tpte = kvtopte(to);
269 #ifdef MIPS3_PLUS
270 	if (CPUISMIPS3 &&
271 	    (mips_cache_indexof(from) != mips_cache_indexof(to)))
272 		mips_dcache_wbinv_range((vaddr_t) from, size);
273 #endif
274 	invalid = (MIPS_HAS_R4K_MMU) ? MIPS3_PG_NV | MIPS3_PG_G : MIPS1_PG_NV;
275 	while (size > 0) {
276 		tpte->pt_entry = fpte->pt_entry;
277 		fpte->pt_entry = invalid;
278 		MIPS_TBIS((vaddr_t)from);
279 		MIPS_TBIS((vaddr_t)to);
280 		fpte++; tpte++;
281 		size -= PAGE_SIZE;
282 		from += PAGE_SIZE;
283 		to += NBPG;
284 	}
285 }
286 
287 /*
288  * Map a user I/O request into kernel virtual address space.
289  * Note: the pages are already locked by uvm_vslock(), so we
290  * do not need to pass an access_type to pmap_enter().
291  */
292 void
293 vmapbuf(bp, len)
294 	struct buf *bp;
295 	vsize_t len;
296 {
297 	vaddr_t faddr, taddr, off;
298 	paddr_t pa;
299 	struct proc *p;
300 
301 	if ((bp->b_flags & B_PHYS) == 0)
302 		panic("vmapbuf");
303 	p = bp->b_proc;
304 	faddr = trunc_page((vaddr_t)bp->b_saveaddr = bp->b_data);
305 	off = (vaddr_t)bp->b_data - faddr;
306 	len = round_page(off + len);
307 	taddr = uvm_km_valloc_prefer_wait(phys_map, len,
308 			trunc_page((vaddr_t)bp->b_data));
309 	bp->b_data = (caddr_t)(taddr + off);
310 	len = atop(len);
311 	while (len--) {
312 		if (pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), faddr,
313 		    &pa) == FALSE)
314 			panic("vmapbuf: null page frame");
315 		pmap_enter(vm_map_pmap(phys_map), taddr, trunc_page(pa),
316 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
317 		faddr += PAGE_SIZE;
318 		taddr += PAGE_SIZE;
319 	}
320 	pmap_update(vm_map_pmap(phys_map));
321 }
322 
323 /*
324  * Unmap a previously-mapped user I/O request.
325  */
326 void
327 vunmapbuf(bp, len)
328 	struct buf *bp;
329 	vsize_t len;
330 {
331 	vaddr_t addr, off;
332 
333 	if ((bp->b_flags & B_PHYS) == 0)
334 		panic("vunmapbuf");
335 	addr = trunc_page((vaddr_t)bp->b_data);
336 	off = (vaddr_t)bp->b_data - addr;
337 	len = round_page(off + len);
338 	pmap_remove(pmap_kernel(), addr, addr + len);
339 	pmap_update(pmap_kernel());
340 	uvm_km_free_wakeup(phys_map, addr, len);
341 	bp->b_data = bp->b_saveaddr;
342 	bp->b_saveaddr = NULL;
343 }
344 
345 /*
346  * Map a (kernel) virtual address to a physical address.
347  *
348  * MIPS processor has 3 distinct kernel address ranges:
349  *
350  * - kseg0 kernel "virtual address" for the   cached physical address space.
351  * - kseg1 kernel "virtual address" for the uncached physical address space.
352  * - kseg2 normal kernel "virtual address" mapped via the TLB.
353  */
354 paddr_t
355 kvtophys(kva)
356 	vaddr_t kva;
357 {
358 	pt_entry_t *pte;
359 	paddr_t phys;
360 
361 	if (kva >= MIPS_KSEG2_START) {
362 		if (kva >= VM_MAX_KERNEL_ADDRESS)
363 			goto overrun;
364 
365 		pte = kvtopte(kva);
366 		if ((pte - Sysmap) > Sysmapsize)  {
367 			printf("oops: Sysmap overrun, max %d index %d\n",
368 			       Sysmapsize, pte - Sysmap);
369 		}
370 		if (!mips_pg_v(pte->pt_entry)) {
371 			printf("kvtophys: pte not valid for %lx\n", kva);
372 		}
373 		phys = mips_tlbpfn_to_paddr(pte->pt_entry) | (kva & PGOFSET);
374 		return phys;
375 	}
376 	if (kva >= MIPS_KSEG1_START)
377 		return MIPS_KSEG1_TO_PHYS(kva);
378 
379 	if (kva >= MIPS_KSEG0_START)
380 		return MIPS_KSEG0_TO_PHYS(kva);
381 
382 overrun:
383 	printf("Virtual address %lx: cannot map to physical\n", kva);
384 #ifdef DDB
385 	Debugger();
386 	return 0;	/* XXX */
387 #endif
388 	panic("kvtophys");
389 }
390