xref: /openbsd/sys/arch/hppa/hppa/vm_machdep.c (revision fde894e5)
1 /*	$OpenBSD: vm_machdep.c,v 1.80 2014/12/16 18:30:03 tedu Exp $	*/
2 
3 /*
4  * Copyright (c) 1999-2004 Michael Shalayeff
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26  * THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/proc.h>
33 #include <sys/signalvar.h>
34 #include <sys/malloc.h>
35 #include <sys/buf.h>
36 #include <sys/vnode.h>
37 #include <sys/user.h>
38 #include <sys/ptrace.h>
39 #include <sys/exec.h>
40 #include <sys/core.h>
41 #include <sys/pool.h>
42 
43 #include <uvm/uvm_extern.h>
44 
45 #include <machine/cpufunc.h>
46 #include <machine/fpu.h>
47 #include <machine/pmap.h>
48 #include <machine/pcb.h>
49 
50 extern struct pool hppa_fppl;
51 
52 /*
53  * Dump the machine specific header information at the start of a core dump.
54  */
55 int
56 cpu_coredump(struct proc *p, struct vnode *vp, struct ucred *cred,
57     struct core *core)
58 {
59 	struct md_coredump md_core;
60 	struct coreseg cseg;
61 	off_t off;
62 	int error;
63 
64 	CORE_SETMAGIC(*core, COREMAGIC, MID_HPPA, 0);
65 	core->c_hdrsize = ALIGN(sizeof(*core));
66 	core->c_seghdrsize = ALIGN(sizeof(cseg));
67 	core->c_cpusize = sizeof(md_core);
68 
69 	process_read_regs(p, &md_core.md_reg);
70 	process_read_fpregs(p, &md_core.md_fpreg);
71 
72 	CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_HPPA, CORE_CPU);
73 	cseg.c_addr = 0;
74 	cseg.c_size = core->c_cpusize;
75 
76 #define	write(vp, addr, n) \
77 	vn_rdwr(UIO_WRITE, (vp), (caddr_t)(addr), (n), off, \
78 	    UIO_SYSSPACE, IO_UNIT, cred, NULL, p)
79 
80 	off = core->c_hdrsize;
81 	if ((error = write(vp, &cseg, core->c_seghdrsize)))
82 		return error;
83 	off += core->c_seghdrsize;
84 	if ((error = write(vp, &md_core, sizeof md_core)))
85 		return error;
86 
87 #undef write
88 	core->c_nseg++;
89 
90 	return error;
91 }
92 
93 void
94 cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize,
95     void (*func)(void *), void *arg)
96 {
97 	struct pcb *pcbp;
98 	struct trapframe *tf;
99 	register_t sp, osp;
100 
101 #ifdef DIAGNOSTIC
102 	if (round_page(sizeof(struct user)) > NBPG)
103 		panic("USPACE too small for user");
104 #endif
105 	fpu_proc_save(p1);
106 
107 	pcbp = &p2->p_addr->u_pcb;
108 	bcopy(&p1->p_addr->u_pcb, pcbp, sizeof(*pcbp));
109 	/* space is cached for the copy{in,out}'s pleasure */
110 	pcbp->pcb_space = p2->p_vmspace->vm_map.pmap->pm_space;
111 	pcbp->pcb_fpstate = pool_get(&hppa_fppl, PR_WAITOK);
112 	*pcbp->pcb_fpstate = *p1->p_addr->u_pcb.pcb_fpstate;
113 	/* reset any of the pending FPU exceptions from parent */
114 	pcbp->pcb_fpstate->hfp_regs.fpr_regs[0] =
115 	    HPPA_FPU_FORK(pcbp->pcb_fpstate->hfp_regs.fpr_regs[0]);
116 	pcbp->pcb_fpstate->hfp_regs.fpr_regs[1] = 0;
117 	pcbp->pcb_fpstate->hfp_regs.fpr_regs[2] = 0;
118 	pcbp->pcb_fpstate->hfp_regs.fpr_regs[3] = 0;
119 
120 	p2->p_md.md_bpva = p1->p_md.md_bpva;
121 	p2->p_md.md_bpsave[0] = p1->p_md.md_bpsave[0];
122 	p2->p_md.md_bpsave[1] = p1->p_md.md_bpsave[1];
123 
124 	sp = (register_t)p2->p_addr + NBPG;
125 	p2->p_md.md_regs = tf = (struct trapframe *)sp;
126 	sp += sizeof(struct trapframe);
127 	bcopy(p1->p_md.md_regs, tf, sizeof(*tf));
128 
129 	tf->tf_cr30 = (paddr_t)pcbp->pcb_fpstate;
130 
131 	tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr2 = tf->tf_sr3 =
132 	tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 =
133 	tf->tf_iisq_head = tf->tf_iisq_tail =
134 		p2->p_vmspace->vm_map.pmap->pm_space;
135 	tf->tf_pidr1 = tf->tf_pidr2 = pmap_sid2pid(tf->tf_sr0);
136 
137 	/*
138 	 * theoretically these could be inherited from the father,
139 	 * but just in case.
140 	 */
141 	tf->tf_sr7 = HPPA_SID_KERNEL;
142 	mfctl(CR_EIEM, tf->tf_eiem);
143 	tf->tf_ipsw = PSL_C | PSL_Q | PSL_P | PSL_D | PSL_I /* | PSL_L */ |
144 	    (curcpu()->ci_psw & PSL_O);
145 
146 	/*
147 	 * If specified, give the child a different stack.
148 	 */
149 	if (stack != NULL)
150 		setstack(tf, (u_long)stack, 0);	/* XXX ignore error? */
151 
152 	/*
153 	 * Build stack frames for the cpu_switchto & co.
154 	 */
155 	osp = sp + HPPA_FRAME_SIZE;
156 	*(register_t*)(osp - HPPA_FRAME_SIZE) = 0;
157 	*(register_t*)(osp + HPPA_FRAME_CRP) = (register_t)&switch_trampoline;
158 	*(register_t*)(osp) = (osp - HPPA_FRAME_SIZE);
159 
160 	sp = osp + HPPA_FRAME_SIZE + 20*4; /* frame + callee-saved registers */
161 	*HPPA_FRAME_CARG(0, sp) = (register_t)arg;
162 	*HPPA_FRAME_CARG(1, sp) = KERNMODE(func);
163 	pcbp->pcb_ksp = sp;
164 }
165 
166 void
167 cpu_exit(struct proc *p)
168 {
169 	struct pcb *pcb = &p->p_addr->u_pcb;
170 
171 	fpu_proc_flush(p);
172 
173 	pool_put(&hppa_fppl, pcb->pcb_fpstate);
174 
175 	pmap_deactivate(p);
176 	sched_exit(p);
177 }
178 
179 /*
180  * Map an IO request into kernel virtual address space.
181  */
182 void
183 vmapbuf(struct buf *bp, vsize_t len)
184 {
185 	struct pmap *pm = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
186 	vaddr_t kva, uva;
187 	vsize_t size, off;
188 
189 #ifdef DIAGNOSTIC
190 	if ((bp->b_flags & B_PHYS) == 0)
191 		panic("vmapbuf");
192 #endif
193 	bp->b_saveaddr = bp->b_data;
194 	uva = trunc_page((vaddr_t)bp->b_data);
195 	off = (vaddr_t)bp->b_data - uva;
196 	size = round_page(off + len);
197 
198 	kva = uvm_km_valloc_prefer_wait(phys_map, size, uva);
199 	bp->b_data = (caddr_t)(kva + off);
200 	while (size > 0) {
201 		paddr_t pa;
202 
203 		if (pmap_extract(pm, uva, &pa) == FALSE)
204 			panic("vmapbuf: null page frame");
205 		else
206 			pmap_kenter_pa(kva, pa, PROT_READ | PROT_WRITE);
207 		uva += PAGE_SIZE;
208 		kva += PAGE_SIZE;
209 		size -= PAGE_SIZE;
210 	}
211 	pmap_update(pmap_kernel());
212 }
213 
214 /*
215  * Unmap IO request from the kernel virtual address space.
216  */
217 void
218 vunmapbuf(struct buf *bp, vsize_t len)
219 {
220 	vaddr_t addr, off;
221 
222 #ifdef DIAGNOSTIC
223 	if ((bp->b_flags & B_PHYS) == 0)
224 		panic("vunmapbuf");
225 #endif
226 	addr = trunc_page((vaddr_t)bp->b_data);
227 	off = (vaddr_t)bp->b_data - addr;
228 	len = round_page(off + len);
229 	pmap_kremove(addr, len);
230 	pmap_update(pmap_kernel());
231 	uvm_km_free_wakeup(phys_map, addr, len);
232 	bp->b_data = bp->b_saveaddr;
233 	bp->b_saveaddr = NULL;
234 }
235