xref: /netbsd/sys/arch/arm/arm/sig_machdep.c (revision 955ad518)
1 /*	$NetBSD: sig_machdep.c,v 1.52 2021/02/01 19:31:34 skrll Exp $	*/
2 
3 /*
4  * Copyright (c) 1994-1998 Mark Brinicombe.
5  * Copyright (c) 1994 Brini.
6  * All rights reserved.
7  *
8  * This code is derived from software written for Brini by Mark Brinicombe
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by Mark Brinicombe
21  *	for the NetBSD Project.
22  * 4. The name of the company nor the name of the author may be used to
23  *    endorse or promote products derived from this software without specific
24  *    prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
27  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
28  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
29  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
30  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * Machine dependent functions for kernel setup
39  *
40  * Created      : 17/09/94
41  */
42 
43 #include "opt_armfpe.h"
44 
45 #include <sys/param.h>
46 
47 __KERNEL_RCSID(0, "$NetBSD: sig_machdep.c,v 1.52 2021/02/01 19:31:34 skrll Exp $");
48 
49 #include <sys/mount.h>		/* XXX only needed by syscallargs.h */
50 #include <sys/cpu.h>
51 #include <sys/proc.h>
52 #include <sys/signal.h>
53 #include <sys/syscallargs.h>
54 #include <sys/systm.h>
55 #include <sys/ras.h>
56 #include <sys/ucontext.h>
57 
58 #include <arm/locore.h>
59 
60 #include <machine/pcb.h>
61 #include <arm/cpufunc.h>
62 
63 void *
getframe(struct lwp * l,int sig,int * onstack)64 getframe(struct lwp *l, int sig, int *onstack)
65 {
66 	struct proc * const p = l->l_proc;
67 	struct trapframe * const tf = lwp_trapframe(l);
68 
69 	/* Do we need to jump onto the signal stack? */
70 	*onstack = (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0
71 	    && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
72 	if (*onstack)
73 		return (char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size;
74 	return (void *)tf->tf_usr_sp;
75 }
76 
77 
78 /*
79  * Send an interrupt to process.
80  *
81  * Stack is set up to allow sigcode stored
82  * in u. to call routine, followed by kcall
83  * to sigreturn routine below.  After sigreturn
84  * resets the signal mask, the stack, and the
85  * frame pointer, it returns to the user specified pc.
86  */
87 void
sendsig_siginfo(const ksiginfo_t * ksi,const sigset_t * mask)88 sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
89 {
90 	struct lwp * const l = curlwp;
91 	struct proc * const p = l->l_proc;
92 	struct sigacts * const ps = p->p_sigacts;
93 	struct trapframe * const tf = lwp_trapframe(l);
94 	struct sigframe_siginfo *fp, frame;
95 	int onstack, error;
96 	int sig = ksi->ksi_signo;
97 	sig_t catcher = SIGACTION(p, sig).sa_handler;
98 
99 	fp = getframe(l, sig, &onstack);
100 
101 	/* make room on the stack */
102 	fp--;
103 
104 	/* make the stack aligned */
105 	fp = (struct sigframe_siginfo *)STACK_ALIGN(fp, STACK_ALIGNBYTES);
106 
107 	/* populate the siginfo frame */
108 	memset(&frame, 0, sizeof(frame));
109 	frame.sf_si._info = ksi->ksi_info;
110 	frame.sf_uc.uc_flags = _UC_SIGMASK;
111 	frame.sf_uc.uc_sigmask = *mask;
112 	frame.sf_uc.uc_link = l->l_ctxlink;
113 	frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK)
114 	    ? _UC_SETSTACK : _UC_CLRSTACK;
115 	sendsig_reset(l, sig);
116 
117 	mutex_exit(p->p_lock);
118 	cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags);
119 	error = copyout(&frame, fp, sizeof(frame));
120 	mutex_enter(p->p_lock);
121 
122 	if (error != 0) {
123 		/*
124 		 * Process has trashed its stack; give it an illegal
125 		 * instruction to halt it in its tracks.
126 		 */
127 		sigexit(l, SIGILL);
128 		/* NOTREACHED */
129 	}
130 
131 	/*
132 	 * Build context to run handler in.  We invoke the handler
133 	 * directly, only returning via the trampoline.  Note the
134 	 * trampoline version numbers are coordinated with machine-
135 	 * dependent code in libc.
136 	 */
137 
138 	tf->tf_r0 = sig;
139 	tf->tf_r1 = (int)&fp->sf_si;
140 	tf->tf_r2 = (int)&fp->sf_uc;
141 
142 	/* the trampoline uses r5 as the uc address */
143 	tf->tf_r5 = (int)&fp->sf_uc;
144 	tf->tf_pc = (int)catcher;
145 #ifdef THUMB_CODE
146 	if (((int) catcher) & 1)
147 		tf->tf_spsr |= PSR_T_bit;
148 	else
149 		tf->tf_spsr &= ~PSR_T_bit;
150 #endif
151 	tf->tf_usr_sp = (int)fp;
152 	tf->tf_usr_lr = (int)ps->sa_sigdesc[sig].sd_tramp;
153 
154 	/* Remember that we're now on the signal stack. */
155 	if (onstack)
156 		l->l_sigstk.ss_flags |= SS_ONSTACK;
157 }
158 
159 void
cpu_getmcontext(struct lwp * l,mcontext_t * mcp,unsigned int * flags)160 cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
161 {
162 	struct trapframe * const tf = lwp_trapframe(l);
163 	__greg_t * const gr = mcp->__gregs;
164 	__greg_t ras_pc;
165 
166 	/* Save General Register context. */
167 	gr[_REG_R0]   = tf->tf_r0;
168 	gr[_REG_R1]   = tf->tf_r1;
169 	gr[_REG_R2]   = tf->tf_r2;
170 	gr[_REG_R3]   = tf->tf_r3;
171 	gr[_REG_R4]   = tf->tf_r4;
172 	gr[_REG_R5]   = tf->tf_r5;
173 	gr[_REG_R6]   = tf->tf_r6;
174 	gr[_REG_R7]   = tf->tf_r7;
175 	gr[_REG_R8]   = tf->tf_r8;
176 	gr[_REG_R9]   = tf->tf_r9;
177 	gr[_REG_R10]  = tf->tf_r10;
178 	gr[_REG_R11]  = tf->tf_r11;
179 	gr[_REG_R12]  = tf->tf_r12;
180 	gr[_REG_SP]   = tf->tf_usr_sp;
181 	gr[_REG_LR]   = tf->tf_usr_lr;
182 	gr[_REG_PC]   = tf->tf_pc;
183 	gr[_REG_CPSR] = tf->tf_spsr;
184 
185 	KASSERTMSG(VALID_PSR(gr[_REG_CPSR]), "%#x", gr[_REG_CPSR]);
186 
187 	if ((ras_pc = (__greg_t)ras_lookup(l->l_proc,
188 	    (void *) gr[_REG_PC])) != -1)
189 		gr[_REG_PC] = ras_pc;
190 
191 	*flags |= _UC_CPU;
192 
193 #ifdef FPU_VFP
194 	vfp_getcontext(l, mcp, flags);
195 #endif
196 
197 	mcp->_mc_tlsbase = (uintptr_t)l->l_private;
198 	*flags |= _UC_TLSBASE;
199 
200 	const struct pcb * const pcb = lwp_getpcb(l);
201 	mcp->_mc_user_tpid = pcb->pcb_user_pid_rw;
202 }
203 
204 int
cpu_mcontext_validate(struct lwp * l,const mcontext_t * mcp)205 cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
206 {
207 	const __greg_t * const gr = mcp->__gregs;
208 
209 	/* Make sure the processor mode has not been tampered with. */
210 	if (!VALID_PSR(gr[_REG_CPSR]))
211 		return EINVAL;
212 	return 0;
213 }
214 
215 int
cpu_setmcontext(struct lwp * l,const mcontext_t * mcp,unsigned int flags)216 cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
217 {
218 	struct trapframe * const tf = lwp_trapframe(l);
219 	const __greg_t * const gr = mcp->__gregs;
220 	struct proc * const p = l->l_proc;
221 	int error;
222 
223 #ifdef FPU_VFP
224 	if ((flags & _UC_FPU)
225 	    && (curcpu()->ci_vfp_id == 0 || (flags & _UC_ARM_VFP) == 0))
226 		return EINVAL;
227 #endif
228 
229 	if ((flags & _UC_CPU) != 0) {
230 		/* Restore General Register context. */
231 		error = cpu_mcontext_validate(l, mcp);
232 		if (error)
233 			return error;
234 
235 		tf->tf_r0     = gr[_REG_R0];
236 		tf->tf_r1     = gr[_REG_R1];
237 		tf->tf_r2     = gr[_REG_R2];
238 		tf->tf_r3     = gr[_REG_R3];
239 		tf->tf_r4     = gr[_REG_R4];
240 		tf->tf_r5     = gr[_REG_R5];
241 		tf->tf_r6     = gr[_REG_R6];
242 		tf->tf_r7     = gr[_REG_R7];
243 		tf->tf_r8     = gr[_REG_R8];
244 		tf->tf_r9     = gr[_REG_R9];
245 		tf->tf_r10    = gr[_REG_R10];
246 		tf->tf_r11    = gr[_REG_R11];
247 		tf->tf_r12    = gr[_REG_R12];
248 		tf->tf_usr_sp = gr[_REG_SP];
249 		tf->tf_usr_lr = gr[_REG_LR];
250 		tf->tf_pc     = gr[_REG_PC];
251 		tf->tf_spsr   = gr[_REG_CPSR];
252 	}
253 
254 #ifdef FPU_VFP
255 	if ((flags & _UC_FPU) != 0) {
256 		/* Restore Floating Point Register context. */
257 		vfp_setcontext(l, mcp);
258 	}
259 #endif
260 
261 	if ((flags & _UC_TLSBASE) != 0)
262 		lwp_setprivate(l, (void *)(uintptr_t)mcp->_mc_tlsbase);
263 
264 	mutex_enter(p->p_lock);
265 	if (flags & _UC_SETSTACK)
266 		l->l_sigstk.ss_flags |= SS_ONSTACK;
267 	if (flags & _UC_CLRSTACK)
268 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
269 	mutex_exit(p->p_lock);
270 
271 	struct pcb * const pcb = lwp_getpcb(l);
272 	pcb->pcb_user_pid_rw = mcp->_mc_user_tpid;
273 
274 	return (0);
275 }
276