xref: /netbsd/sys/arch/m68k/m68k/vm_machdep.c (revision c4a72b64)
1 /*	$NetBSD: vm_machdep.c,v 1.1 2002/10/20 02:37:43 chs Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1982, 1986, 1990, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
41  *
42  *	@(#)vm_machdep.c	8.6 (Berkeley) 1/12/94
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.1 2002/10/20 02:37:43 chs Exp $");
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/malloc.h>
52 #include <sys/buf.h>
53 #include <sys/vnode.h>
54 #include <sys/user.h>
55 #include <sys/core.h>
56 #include <sys/exec.h>
57 
58 #include <machine/frame.h>
59 #include <machine/cpu.h>
60 #include <machine/pte.h>
61 #include <machine/reg.h>
62 
63 #include <uvm/uvm_extern.h>
64 
65 /*
66  * Finish a fork operation, with process p2 nearly set up.
67  * Copy and update the pcb and trap frame, making the child ready to run.
68  *
69  * Rig the child's kernel stack so that it will start out in
70  * proc_trampoline() and call child_return() with p2 as an
71  * argument. This causes the newly-created child process to go
72  * directly to user level with an apparent return value of 0 from
73  * fork(), while the parent process returns normally.
74  *
75  * p1 is the process being forked; if p1 == &proc0, we are creating
76  * a kernel thread, and the return path and argument are specified with
77  * `func' and `arg'.
78  *
79  * If an alternate user-level stack is requested (with non-zero values
80  * in both the stack and stacksize args), set up the user stack pointer
81  * accordingly.
82  */
83 void
84 cpu_fork(p1, p2, stack, stacksize, func, arg)
85 	struct proc *p1, *p2;
86 	void *stack;
87 	size_t stacksize;
88 	void (*func) __P((void *));
89 	void *arg;
90 {
91 	struct pcb *pcb = &p2->p_addr->u_pcb;
92 	struct trapframe *tf;
93 	struct switchframe *sf;
94 	extern struct pcb *curpcb;
95 
96 	p2->p_md.md_flags = p1->p_md.md_flags;
97 
98 	/* Copy pcb from proc p1 to p2. */
99 	if (p1 == curproc) {
100 		/* Sync the PCB before we copy it. */
101 		savectx(curpcb);
102 	}
103 #ifdef DIAGNOSTIC
104 	else if (p1 != &proc0)
105 		panic("cpu_fork: curproc");
106 #endif
107 	*pcb = p1->p_addr->u_pcb;
108 
109 	/*
110 	 * Copy the trap frame.
111 	 */
112 	tf = (struct trapframe *)((u_int)p2->p_addr + USPACE) - 1;
113 	p2->p_md.md_regs = (int *)tf;
114 	*tf = *(struct trapframe *)p1->p_md.md_regs;
115 
116 	/*
117 	 * If specified, give the child a different stack.
118 	 */
119 	if (stack != NULL)
120 		tf->tf_regs[15] = (u_int)stack + stacksize;
121 
122 	sf = (struct switchframe *)tf - 1;
123 	sf->sf_pc = (u_int)proc_trampoline;
124 	pcb->pcb_regs[6] = (int)func;		/* A2 */
125 	pcb->pcb_regs[7] = (int)arg;		/* A3 */
126 	pcb->pcb_regs[11] = (int)sf;		/* SSP */
127 }
128 
129 /*
130  * cpu_exit is called as the last action during exit.
131  *
132  * Block context switches and then call switch_exit() which will
133  * switch to another process thus we never return.
134  */
135 void
136 cpu_exit(p)
137 	struct proc *p;
138 {
139 
140 	(void) splhigh();
141 	uvmexp.swtch++;
142 	switch_exit(p);
143 	/* NOTREACHED */
144 }
145 
146 /*
147  * Dump the machine specific header information at the start of a core dump.
148  */
149 struct md_core {
150 	struct reg intreg;
151 	struct fpreg freg;
152 };
153 int
154 cpu_coredump(p, vp, cred, chdr)
155 	struct proc *p;
156 	struct vnode *vp;
157 	struct ucred *cred;
158 	struct core *chdr;
159 {
160 	struct md_core md_core;
161 	struct coreseg cseg;
162 	int error;
163 
164 	CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0);
165 	chdr->c_hdrsize = ALIGN(sizeof(*chdr));
166 	chdr->c_seghdrsize = ALIGN(sizeof(cseg));
167 	chdr->c_cpusize = sizeof(md_core);
168 
169 	/* Save integer registers. */
170 	error = process_read_regs(p, &md_core.intreg);
171 	if (error)
172 		return error;
173 
174 	if (fputype) {
175 		/* Save floating point registers. */
176 		error = process_read_fpregs(p, &md_core.freg);
177 		if (error)
178 			return error;
179 	} else {
180 		/* Make sure these are clear. */
181 		memset((caddr_t)&md_core.freg, 0, sizeof(md_core.freg));
182 	}
183 
184 	CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU);
185 	cseg.c_addr = 0;
186 	cseg.c_size = chdr->c_cpusize;
187 
188 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize,
189 	    (off_t)chdr->c_hdrsize, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred,
190 	    NULL, p);
191 	if (error)
192 		return error;
193 
194 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&md_core, sizeof(md_core),
195 	    (off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE,
196 	    IO_NODELOCKED|IO_UNIT, cred, NULL, p);
197 	if (error)
198 		return error;
199 
200 	chdr->c_nseg++;
201 	return 0;
202 }
203 
204 /*
205  * Move pages from one kernel virtual address to another.
206  * Both addresses are assumed to reside in the Sysmap,
207  * and size must be a multiple of PAGE_SIZE.
208  */
209 void
210 pagemove(from, to, size)
211 	caddr_t from, to;
212 	size_t size;
213 {
214 	paddr_t pa;
215 	boolean_t rv;
216 
217 #ifdef DEBUG
218 	if (size & PGOFSET)
219 		panic("pagemove");
220 #endif
221 	while (size > 0) {
222 		rv = pmap_extract(pmap_kernel(), (vaddr_t)from, &pa);
223 #ifdef DEBUG
224 		if (rv == FALSE)
225 			panic("pagemove 2");
226 		if (pmap_extract(pmap_kernel(), (vaddr_t)to, NULL) == TRUE)
227 			panic("pagemove 3");
228 #endif
229 		pmap_kremove((vaddr_t)from, PAGE_SIZE);
230 		pmap_kenter_pa((vaddr_t)to, pa, VM_PROT_READ | VM_PROT_WRITE);
231 		from += PAGE_SIZE;
232 		to += PAGE_SIZE;
233 		size -= PAGE_SIZE;
234 	}
235 	pmap_update(pmap_kernel());
236 }
237 
238 /*
239  * Map a user I/O request into kernel virtual address space.
240  * Note: the pages are already locked by uvm_vslock(), so we
241  * do not need to pass an access_type to pmap_enter().
242  */
243 void
244 vmapbuf(bp, len)
245 	struct buf *bp;
246 	vsize_t len;
247 {
248 	struct pmap *upmap, *kpmap;
249 	vaddr_t uva;		/* User VA (map from) */
250 	vaddr_t kva;		/* Kernel VA (new to) */
251 	paddr_t pa; 		/* physical address */
252 	vsize_t off;
253 
254 	if ((bp->b_flags & B_PHYS) == 0)
255 		panic("vmapbuf");
256 
257 	uva = m68k_trunc_page(bp->b_saveaddr = bp->b_data);
258 	off = (vaddr_t)bp->b_data - uva;
259 	len = m68k_round_page(off + len);
260 	kva = uvm_km_valloc_wait(phys_map, len);
261 	bp->b_data = (caddr_t)(kva + off);
262 
263 	upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
264 	kpmap = vm_map_pmap(phys_map);
265 	do {
266 		if (pmap_extract(upmap, uva, &pa) == FALSE)
267 			panic("vmapbuf: null page frame");
268 #ifdef M68K_VAC
269 		pmap_enter(kpmap, kva, pa, VM_PROT_READ | VM_PROT_WRITE,
270 		    PMAP_WIRED);
271 #else
272 		pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE);
273 #endif
274 		uva += PAGE_SIZE;
275 		kva += PAGE_SIZE;
276 		len -= PAGE_SIZE;
277 	} while (len);
278 	pmap_update(kpmap);
279 }
280 
281 /*
282  * Unmap a previously-mapped user I/O request.
283  */
284 void
285 vunmapbuf(bp, len)
286 	struct buf *bp;
287 	vsize_t len;
288 {
289 	vaddr_t kva;
290 	vsize_t off;
291 
292 	if ((bp->b_flags & B_PHYS) == 0)
293 		panic("vunmapbuf");
294 
295 	kva = m68k_trunc_page(bp->b_data);
296 	off = (vaddr_t)bp->b_data - kva;
297 	len = m68k_round_page(off + len);
298 
299 #ifdef M68K_VAC
300 	pmap_remove(vm_map_pmap(phys_map), kva, kva + len);
301 #else
302 	pmap_kremove(kva, len);
303 #endif
304 	pmap_update(pmap_kernel());
305 	uvm_km_free_wakeup(phys_map, kva, len);
306 	bp->b_data = bp->b_saveaddr;
307 	bp->b_saveaddr = 0;
308 }
309 
310 
311 #if defined(M68K_MMU_MOTOROLA) || defined(M68K_MMU_HP)
312 
313 #include <m68k/cacheops.h>
314 
315 /*
316  * Map `size' bytes of physical memory starting at `paddr' into
317  * kernel VA space at `vaddr'.  Read/write and cache-inhibit status
318  * are specified by `prot'.
319  */
320 void
321 physaccess(vaddr, paddr, size, prot)
322 	caddr_t vaddr, paddr;
323 	int size, prot;
324 {
325 	pt_entry_t *pte;
326 	u_int page;
327 
328 	pte = kvtopte(vaddr);
329 	page = (u_int)paddr & PG_FRAME;
330 	for (size = btoc(size); size; size--) {
331 		*pte++ = PG_V | prot | page;
332 		page += NBPG;
333 	}
334 	TBIAS();
335 }
336 
337 void
338 physunaccess(vaddr, size)
339 	caddr_t vaddr;
340 	int size;
341 {
342 	pt_entry_t *pte;
343 
344 	pte = kvtopte(vaddr);
345 	for (size = btoc(size); size; size--)
346 		*pte++ = PG_NV;
347 	TBIAS();
348 }
349 
350 /*
351  * Convert kernel VA to physical address
352  */
353 int
354 kvtop(addr)
355 	caddr_t addr;
356 {
357 	paddr_t pa;
358 
359 	if (pmap_extract(pmap_kernel(), (vaddr_t)addr, &pa) == FALSE)
360 		panic("kvtop: zero page frame");
361 	return((int)pa);
362 }
363 
364 #endif
365