xref: /netbsd/sys/arch/vax/vax/vm_machdep.c (revision bf9ec67e)
1 /*	$NetBSD: vm_machdep.c,v 1.77 2002/03/10 22:32:31 ragge Exp $	     */
2 
3 /*
4  * Copyright (c) 1994 Ludd, University of Lule}, Sweden.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *     This product includes software developed at Ludd, University of Lule}.
18  * 4. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include "opt_compat_ultrix.h"
34 
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/user.h>
40 #include <sys/exec.h>
41 #include <sys/vnode.h>
42 #include <sys/core.h>
43 #include <sys/mount.h>
44 #include <sys/device.h>
45 
46 #include <uvm/uvm_extern.h>
47 
48 #include <machine/vmparam.h>
49 #include <machine/mtpr.h>
50 #include <machine/pmap.h>
51 #include <machine/pte.h>
52 #include <machine/macros.h>
53 #include <machine/trap.h>
54 #include <machine/pcb.h>
55 #include <machine/frame.h>
56 #include <machine/cpu.h>
57 #include <machine/sid.h>
58 
59 #include <sys/syscallargs.h>
60 
61 #include "opt_cputype.h"
62 
63 /*
64  * pagemove - moves pages at virtual address from to virtual address to,
65  * block moved of size size. Using fast insn bcopy for pte move.
66  */
67 void
68 pagemove(caddr_t from, caddr_t to, size_t size)
69 {
70 	pt_entry_t *fpte, *tpte;
71 	int	stor;
72 
73 	fpte = kvtopte(from);
74 	tpte = kvtopte(to);
75 
76 	stor = (size >> VAX_PGSHIFT) * sizeof(struct pte);
77 	bcopy(fpte, tpte, stor);
78 	bzero(fpte, stor);
79 	mtpr(0, PR_TBIA);
80 }
81 
82 #ifdef MULTIPROCESSOR
83 static void
84 procjmp(void *arg)
85 {
86 	struct pcb *pcb = arg;
87 	void (*func)(void *);
88 
89 	func = (void *)pcb->R[0];
90 	arg = (void *)pcb->R[1];
91 	proc_trampoline_mp();
92 	(*func)(arg);
93 }
94 #endif
95 
96 /*
97  * Finish a fork operation, with process p2 nearly set up.
98  * Copy and update the pcb and trap frame, making the child ready to run.
99  *
100  * Rig the child's kernel stack so that it will start out in
101  * proc_trampoline() and call child_return() with p2 as an
102  * argument. This causes the newly-created child process to go
103  * directly to user level with an apparent return value of 0 from
104  * fork(), while the parent process returns normally.
105  *
106  * p1 is the process being forked; if p1 == &proc0, we are creating
107  * a kernel thread, and the return path and argument are specified with
108  * `func' and `arg'.
109  *
110  * If an alternate user-level stack is requested (with non-zero values
111  * in both the stack and stacksize args), set up the user stack pointer
112  * accordingly.
113  *
114  * cpu_fork() copies parent process trapframe and creates a fake CALLS
115  * frame on top of it, so that it can safely call child_return().
116  * We also take away mapping for the fourth page after pcb, so that
117  * we get something like a "red zone" for the kernel stack.
118  */
119 void
120 cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize,
121     void (*func)(void *), void *arg)
122 {
123 	struct pcb *pcb;
124 	struct trapframe *tf;
125 	struct callsframe *cf;
126 	extern int sret; /* Return address in trap routine */
127 
128 #ifdef DIAGNOSTIC
129 	/*
130 	 * if p1 != curproc && p1 == &proc0, we're creating a kernel thread.
131 	 */
132 	if (p1 != curproc && p1 != &proc0)
133 		panic("cpu_fork: curproc");
134 #endif
135 
136 	/*
137 	 * Copy the trap frame.
138 	 */
139 	tf = (struct trapframe *)((u_int)p2->p_addr + USPACE) - 1;
140 	p2->p_addr->u_pcb.framep = tf;
141 	bcopy(p1->p_addr->u_pcb.framep, tf, sizeof(*tf));
142 
143 	/*
144 	 * Activate address space for the new process.	The PTEs have
145 	 * already been allocated by way of pmap_create().
146 	 * This writes the page table registers to the PCB.
147 	 */
148 	pmap_activate(p2);
149 
150 	/* Mark guard page invalid in kernel stack */
151 	kvtopte((u_int)p2->p_addr + REDZONEADDR)->pg_v = 0;
152 
153 	/*
154 	 * Set up the calls frame above (below) the trapframe
155 	 * and populate it with something good.
156 	 * This is so that we can simulate that we were called by a
157 	 * CALLS insn in the function given as argument.
158 	 */
159 	cf = (struct callsframe *)tf - 1;
160 	cf->ca_cond = 0;
161 	cf->ca_maskpsw = 0x20000000;	/* CALLS stack frame, no registers */
162 	cf->ca_pc = (unsigned)&sret;	/* return PC; userspace trampoline */
163 	cf->ca_argno = 1;
164 	cf->ca_arg1 = (int)arg;
165 
166 	/*
167 	 * Set up internal defs in PCB. This matches the "fake" CALLS frame
168 	 * that were constructed earlier.
169 	 */
170 	pcb = &p2->p_addr->u_pcb;
171 	pcb->iftrap = NULL;
172 	pcb->KSP = (long)cf;
173 	pcb->FP = (long)cf;
174 	pcb->AP = (long)&cf->ca_argno;
175 #ifdef MULTIPROCESSOR
176 	cf->ca_arg1 = (long)pcb;
177 	pcb->PC = (long)procjmp + 2;
178 	pcb->R[0] = (int)func;
179 	pcb->R[1] = (int)arg;
180 #else
181 	pcb->PC = (int)func + 2;	/* Skip save mask */
182 #endif
183 
184 	/*
185 	 * If specified, give the child a different stack.
186 	 */
187 	if (stack != NULL)
188 		tf->sp = (u_long)stack + stacksize;
189 
190 	/*
191 	 * Set the last return information after fork().
192 	 * This is only interesting if the child will return to userspace,
193 	 * but doesn't hurt otherwise.
194 	 */
195 	tf->r0 = p1->p_pid; /* parent pid. (shouldn't be needed) */
196 	tf->r1 = 1;
197 	tf->psl = PSL_U|PSL_PREVU;
198 }
199 
200 int
201 cpu_exec_aout_makecmds(p, epp)
202 	struct proc *p;
203 	struct exec_package *epp;
204 {
205 	return ENOEXEC;
206 }
207 
208 int
209 sys_sysarch(p, v, retval)
210 	struct proc *p;
211 	void *v;
212 	register_t *retval;
213 {
214 
215 	return (ENOSYS);
216 };
217 
218 /*
219  * Dump the machine specific header information at the start of a core dump.
220  * First put all regs in PCB for debugging purposes. This is not an good
221  * way to do this, but good for my purposes so far.
222  */
223 int
224 cpu_coredump(p, vp, cred, chdr)
225 	struct proc *p;
226 	struct vnode *vp;
227 	struct ucred *cred;
228 	struct core *chdr;
229 {
230 	struct trapframe *tf;
231 	struct md_coredump state;
232 	struct coreseg cseg;
233 	int error;
234 
235 	tf = p->p_addr->u_pcb.framep;
236 	CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0);
237 	chdr->c_hdrsize = sizeof(struct core);
238 	chdr->c_seghdrsize = sizeof(struct coreseg);
239 	chdr->c_cpusize = sizeof(struct md_coredump);
240 
241 	bcopy(tf, &state, sizeof(struct md_coredump));
242 
243 	CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU);
244 	cseg.c_addr = 0;
245 	cseg.c_size = chdr->c_cpusize;
246 
247 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize,
248 	    (off_t)chdr->c_hdrsize, UIO_SYSSPACE,
249 	    IO_NODELOCKED|IO_UNIT, cred, NULL, p);
250 	if (error)
251 		return error;
252 
253 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&state, sizeof(state),
254 	    (off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE,
255 	    IO_NODELOCKED|IO_UNIT, cred, NULL, p);
256 
257 	if (!error)
258 		chdr->c_nseg++;
259 
260 	return error;
261 }
262 
263 /*
264  * Map in a bunch of pages read/writeable for the kernel.
265  */
266 void
267 ioaccess(vaddr, paddr, npgs)
268 	vaddr_t vaddr;
269 	paddr_t paddr;
270 	int npgs;
271 {
272 	u_int *pte = (u_int *)kvtopte(vaddr);
273 	int i;
274 
275 	for (i = 0; i < npgs; i++)
276 		pte[i] = PG_V | PG_KW | (PG_PFNUM(paddr) + i);
277 }
278 
279 /*
280  * Opposite to the above: just forget their mapping.
281  */
282 void
283 iounaccess(vaddr, npgs)
284 	vaddr_t vaddr;
285 	int npgs;
286 {
287 	u_int *pte = (u_int *)kvtopte(vaddr);
288 	int i;
289 
290 	for (i = 0; i < npgs; i++)
291 		pte[i] = 0;
292 	mtpr(0, PR_TBIA);
293 }
294 
295 /*
296  * Map a user I/O request into kernel virtual address space.
297  * Note: the pages are already locked by uvm_vslock(), so we
298  * do not need to pass an access_type to pmap_enter().
299  */
300 void
301 vmapbuf(bp, len)
302 	struct buf *bp;
303 	vsize_t len;
304 {
305 #if VAX46 || VAX48 || VAX49 || VAX53 || VAXANY
306 	vaddr_t faddr, taddr, off;
307 	paddr_t pa;
308 	struct proc *p;
309 
310 	if (vax_boardtype != VAX_BTYP_46
311 	    && vax_boardtype != VAX_BTYP_48
312 	    && vax_boardtype != VAX_BTYP_49
313 	    && vax_boardtype != VAX_BTYP_53)
314 		return;
315 	if ((bp->b_flags & B_PHYS) == 0)
316 		panic("vmapbuf");
317 	p = bp->b_proc;
318 	faddr = trunc_page((vaddr_t)bp->b_saveaddr = bp->b_data);
319 	off = (vaddr_t)bp->b_data - faddr;
320 	len = round_page(off + len);
321 	taddr = uvm_km_valloc_wait(phys_map, len);
322 	bp->b_data = (caddr_t)(taddr + off);
323 	len = atop(len);
324 	while (len--) {
325 		if (pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), faddr,
326 		    &pa) == FALSE)
327 			panic("vmapbuf: null page frame");
328 		pmap_enter(vm_map_pmap(phys_map), taddr, trunc_page(pa),
329 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
330 		faddr += PAGE_SIZE;
331 		taddr += PAGE_SIZE;
332 	}
333 	pmap_update(vm_map_pmap(phys_map));
334 #endif
335 }
336 
337 /*
338  * Unmap a previously-mapped user I/O request.
339  */
340 void
341 vunmapbuf(bp, len)
342 	struct buf *bp;
343 	vsize_t len;
344 {
345 #if VAX46 || VAX48 || VAX49 || VAX53 || VAXANY
346 	vaddr_t addr, off;
347 
348 	if (vax_boardtype != VAX_BTYP_46
349 	    && vax_boardtype != VAX_BTYP_48
350 	    && vax_boardtype != VAX_BTYP_49
351 	    && vax_boardtype != VAX_BTYP_53)
352 		return;
353 	if ((bp->b_flags & B_PHYS) == 0)
354 		panic("vunmapbuf");
355 	addr = trunc_page((vaddr_t)bp->b_data);
356 	off = (vaddr_t)bp->b_data - addr;
357 	len = round_page(off + len);
358 	pmap_remove(vm_map_pmap(phys_map), addr, addr + len);
359 	pmap_update(vm_map_pmap(phys_map));
360 	uvm_km_free_wakeup(phys_map, addr, len);
361 	bp->b_data = bp->b_saveaddr;
362 	bp->b_saveaddr = NULL;
363 #endif
364 }
365