xref: /netbsd/sys/arch/alpha/alpha/vm_machdep.c (revision bf9ec67e)
1 /* $NetBSD: vm_machdep.c,v 1.76 2002/02/26 15:13:28 simonb Exp $ */
2 
3 /*
4  * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
5  * All rights reserved.
6  *
7  * Author: Chris G. Demetriou
8  *
9  * Permission to use, copy, modify and distribute this software and
10  * its documentation is hereby granted, provided that both the copyright
11  * notice and this permission notice appear in all copies of the
12  * software, derivative works or modified versions, and any portions
13  * thereof, and that both notices appear in supporting documentation.
14  *
15  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18  *
19  * Carnegie Mellon requests users of this software to return to
20  *
21  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
22  *  School of Computer Science
23  *  Carnegie Mellon University
24  *  Pittsburgh PA 15213-3890
25  *
26  * any improvements or extensions that they make and grant Carnegie the
27  * rights to redistribute these changes.
28  */
29 
30 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
31 
32 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.76 2002/02/26 15:13:28 simonb Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/proc.h>
37 #include <sys/malloc.h>
38 #include <sys/buf.h>
39 #include <sys/vnode.h>
40 #include <sys/user.h>
41 #include <sys/core.h>
42 #include <sys/exec.h>
43 
44 #include <uvm/uvm_extern.h>
45 
46 #include <machine/cpu.h>
47 #include <machine/alpha.h>
48 #include <machine/pmap.h>
49 #include <machine/reg.h>
50 
51 /*
52  * Dump the machine specific header information at the start of a core dump.
53  */
54 int
55 cpu_coredump(struct proc *p, struct vnode *vp, struct ucred *cred,
56     struct core *chdr)
57 {
58 	int error;
59 	struct md_coredump cpustate;
60 	struct coreseg cseg;
61 
62 	CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0);
63 	chdr->c_hdrsize = ALIGN(sizeof(*chdr));
64 	chdr->c_seghdrsize = ALIGN(sizeof(cseg));
65 	chdr->c_cpusize = sizeof(cpustate);
66 
67 	cpustate.md_tf = *p->p_md.md_tf;
68 	cpustate.md_tf.tf_regs[FRAME_SP] = alpha_pal_rdusp();	/* XXX */
69 	if (p->p_md.md_flags & MDP_FPUSED) {
70 		if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
71 			fpusave_proc(p, 1);
72 		cpustate.md_fpstate = p->p_addr->u_pcb.pcb_fp;
73 	} else
74 		memset(&cpustate.md_fpstate, 0, sizeof(cpustate.md_fpstate));
75 
76 	CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU);
77 	cseg.c_addr = 0;
78 	cseg.c_size = chdr->c_cpusize;
79 
80 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize,
81 	    (off_t)chdr->c_hdrsize, UIO_SYSSPACE,
82 	    IO_NODELOCKED|IO_UNIT, cred, NULL, p);
83 	if (error)
84 		return error;
85 
86 	error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cpustate, sizeof(cpustate),
87 	    (off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), UIO_SYSSPACE,
88 	    IO_NODELOCKED|IO_UNIT, cred, NULL, p);
89 
90 	if (!error)
91 		chdr->c_nseg++;
92 
93 	return error;
94 }
95 
96 /*
97  * cpu_exit is called as the last action during exit.
98  * We block interrupts and call switch_exit.  switch_exit switches
99  * to proc0's PCB and stack, then jumps into the middle of cpu_switch,
100  * as if it were switching from proc0.
101  */
102 void
103 cpu_exit(struct proc *p)
104 {
105 
106 	if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
107 		fpusave_proc(p, 0);
108 
109 	/*
110 	 * Deactivate the exiting address space before the vmspace
111 	 * is freed.  Note that we will continue to run on this
112 	 * vmspace's context until the switch to proc0 in switch_exit().
113 	 */
114 	pmap_deactivate(p);
115 
116 	(void) splhigh();
117 	switch_exit(p);
118 	/* NOTREACHED */
119 }
120 
121 /*
122  * Finish a fork operation, with process p2 nearly set up.
123  * Copy and update the pcb and trap frame, making the child ready to run.
124  *
125  * Rig the child's kernel stack so that it will start out in
126  * proc_trampoline() and call child_return() with p2 as an
127  * argument. This causes the newly-created child process to go
128  * directly to user level with an apparent return value of 0 from
129  * fork(), while the parent process returns normally.
130  *
131  * p1 is the process being forked; if p1 == &proc0, we are creating
132  * a kernel thread, and the return path and argument are specified with
133  * `func' and `arg'.
134  *
135  * If an alternate user-level stack is requested (with non-zero values
136  * in both the stack and stacksize args), set up the user stack pointer
137  * accordingly.
138  */
139 void
140 cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize,
141     void (*func)(void *), void *arg)
142 {
143 	struct user *up = p2->p_addr;
144 
145 	p2->p_md.md_tf = p1->p_md.md_tf;
146 
147 	p2->p_md.md_flags = p1->p_md.md_flags & (MDP_FPUSED | MDP_FP_C);
148 
149 	/*
150 	 * Cache the physical address of the pcb, so we can
151 	 * swap to it easily.
152 	 */
153 	p2->p_md.md_pcbpaddr = (void *)vtophys((vaddr_t)&up->u_pcb);
154 
155 	/*
156 	 * Copy floating point state from the FP chip to the PCB
157 	 * if this process has state stored there.
158 	 */
159 	if (p1->p_addr->u_pcb.pcb_fpcpu != NULL)
160 		fpusave_proc(p1, 1);
161 
162 	/*
163 	 * Copy pcb and user stack pointer from proc p1 to p2.
164 	 * If specificed, give the child a different stack.
165 	 */
166 	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
167 	if (stack != NULL)
168 		p2->p_addr->u_pcb.pcb_hw.apcb_usp = (u_long)stack + stacksize;
169 	else
170 		p2->p_addr->u_pcb.pcb_hw.apcb_usp = alpha_pal_rdusp();
171 	simple_lock_init(&p2->p_addr->u_pcb.pcb_fpcpu_slock);
172 
173 	/*
174 	 * Arrange for a non-local goto when the new process
175 	 * is started, to resume here, returning nonzero from setjmp.
176 	 */
177 #ifdef DIAGNOSTIC
178 	/*
179 	 * If p1 != curproc && p1 == &proc0, we are creating a kernel
180 	 * thread.
181 	 */
182 	if (p1 != curproc && p1 != &proc0)
183 		panic("cpu_fork: curproc");
184 #endif
185 
186 	/*
187 	 * create the child's kernel stack, from scratch.
188 	 */
189 	{
190 		struct trapframe *p2tf;
191 
192 		/*
193 		 * Pick a stack pointer, leaving room for a trapframe;
194 		 * copy trapframe from parent so return to user mode
195 		 * will be to right address, with correct registers.
196 		 */
197 		p2tf = p2->p_md.md_tf = (struct trapframe *)
198 		    ((char *)p2->p_addr + USPACE - sizeof(struct trapframe));
199 		memcpy(p2->p_md.md_tf, p1->p_md.md_tf,
200 		    sizeof(struct trapframe));
201 
202 		/*
203 		 * Set up return-value registers as fork() libc stub expects.
204 		 */
205 		p2tf->tf_regs[FRAME_V0] = p1->p_pid;	/* parent's pid */
206 		p2tf->tf_regs[FRAME_A3] = 0;		/* no error */
207 		p2tf->tf_regs[FRAME_A4] = 1;		/* is child */
208 
209 		up->u_pcb.pcb_hw.apcb_ksp = (u_int64_t)p2tf;
210 		up->u_pcb.pcb_context[0] =
211 		    (u_int64_t)func;			/* s0: pc */
212 		up->u_pcb.pcb_context[1] =
213 		    (u_int64_t)exception_return;	/* s1: ra */
214 		up->u_pcb.pcb_context[2] =
215 		    (u_int64_t)arg;			/* s2: arg */
216 		up->u_pcb.pcb_context[7] =
217 		    (u_int64_t)proc_trampoline;		/* ra: assembly magic */
218 		up->u_pcb.pcb_context[8] = ALPHA_PSL_IPL_0; /* ps: IPL */
219 	}
220 }
221 
222 /*
223  * Finish a swapin operation.
224  *
225  * We need to cache the physical address of the PCB, so we can
226  * swap context to it easily.
227  */
228 void
229 cpu_swapin(struct proc *p)
230 {
231 	struct user *up = p->p_addr;
232 
233 	p->p_md.md_pcbpaddr = (void *)vtophys((vaddr_t)&up->u_pcb);
234 }
235 
236 /*
237  * cpu_swapout is called immediately before a process's 'struct user'
238  * and kernel stack are unwired (which are in turn done immediately
239  * before it's P_INMEM flag is cleared).  If the process is the
240  * current owner of the floating point unit, the FP state has to be
241  * saved, so that it goes out with the pcb, which is in the user area.
242  */
243 void
244 cpu_swapout(struct proc *p)
245 {
246 
247 	if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
248 		fpusave_proc(p, 1);
249 }
250 
251 /*
252  * Move pages from one kernel virtual address to another.
253  * Both addresses are assumed to have valid page table pages.
254  * and size must be a multiple of NBPG.
255  *
256  * Note that since all kernel page table pages are pre-allocated
257  * and mapped in, we can use the Virtual Page Table.
258  */
259 void
260 pagemove(caddr_t from, caddr_t to, size_t size)
261 {
262 	long fidx, tidx;
263 	ssize_t todo;
264 	PMAP_TLB_SHOOTDOWN_CPUSET_DECL
265 
266 	if (size % NBPG)
267 		panic("pagemove");
268 
269 	todo = size;			/* if testing > 0, need sign... */
270 	while (todo > 0) {
271 		fidx = VPT_INDEX(from);
272 		tidx = VPT_INDEX(to);
273 
274 		VPT[tidx] = VPT[fidx];
275 		VPT[fidx] = 0;
276 
277 		ALPHA_TBIS((vaddr_t)from);
278 		ALPHA_TBIS((vaddr_t)to);
279 
280 		PMAP_TLB_SHOOTDOWN(pmap_kernel(), (vaddr_t)from, PG_ASM);
281 		PMAP_TLB_SHOOTDOWN(pmap_kernel(), (vaddr_t)to, PG_ASM);
282 
283 		todo -= NBPG;
284 		from += NBPG;
285 		to += NBPG;
286 	}
287 
288 	PMAP_TLB_SHOOTNOW();
289 }
290 
291 /*
292  * Map a user I/O request into kernel virtual address space.
293  * Note: the pages are already locked by uvm_vslock(), so we
294  * do not need to pass an access_type to pmap_enter().
295  */
296 void
297 vmapbuf(struct buf *bp, vsize_t len)
298 {
299 	vaddr_t faddr, taddr, off;
300 	paddr_t pa;
301 	struct proc *p;
302 
303 	if ((bp->b_flags & B_PHYS) == 0)
304 		panic("vmapbuf");
305 	p = bp->b_proc;
306 	faddr = trunc_page((vaddr_t)bp->b_saveaddr = bp->b_data);
307 	off = (vaddr_t)bp->b_data - faddr;
308 	len = round_page(off + len);
309 	taddr = uvm_km_valloc_wait(phys_map, len);
310 	bp->b_data = (caddr_t)(taddr + off);
311 	len = atop(len);
312 	while (len--) {
313 		if (pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), faddr,
314 		    &pa) == FALSE)
315 			panic("vmapbuf: null page frame");
316 		pmap_enter(vm_map_pmap(phys_map), taddr, trunc_page(pa),
317 		    VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
318 		faddr += PAGE_SIZE;
319 		taddr += PAGE_SIZE;
320 	}
321 	pmap_update(vm_map_pmap(phys_map));
322 }
323 
324 /*
325  * Unmap a previously-mapped user I/O request.
326  */
327 void
328 vunmapbuf(struct buf *bp, vsize_t len)
329 {
330 	vaddr_t addr, off;
331 
332 	if ((bp->b_flags & B_PHYS) == 0)
333 		panic("vunmapbuf");
334 	addr = trunc_page((vaddr_t)bp->b_data);
335 	off = (vaddr_t)bp->b_data - addr;
336 	len = round_page(off + len);
337 	pmap_remove(vm_map_pmap(phys_map), addr, addr + len);
338 	pmap_update(vm_map_pmap(phys_map));
339 	uvm_km_free_wakeup(phys_map, addr, len);
340 	bp->b_data = bp->b_saveaddr;
341 	bp->b_saveaddr = NULL;
342 }
343