xref: /netbsd/sys/arch/i386/i386/compat_13_machdep.c (revision 6550d01e)
1 /*	$NetBSD: compat_13_machdep.c,v 1.25 2009/11/21 03:11:00 rmind Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: compat_13_machdep.c,v 1.25 2009/11/21 03:11:00 rmind Exp $");
34 
35 #ifdef _KERNEL_OPT
36 #include "opt_vm86.h"
37 #endif
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/signalvar.h>
42 #include <sys/kernel.h>
43 #include <sys/proc.h>
44 #include <sys/mount.h>
45 #include <sys/syscallargs.h>
46 
47 #include <compat/sys/signal.h>
48 #include <compat/sys/signalvar.h>
49 
50 #ifdef VM86
51 #include <machine/vm86.h>
52 #endif
53 
54 int
55 compat_13_sys_sigreturn(struct lwp *l, const struct compat_13_sys_sigreturn_args *uap, register_t *retval)
56 {
57 	/* {
58 		syscallarg(struct sigcontext13 *) sigcntxp;
59 	} */
60 	struct proc *p = l->l_proc;
61 	struct sigcontext13 *scp, context;
62 	struct trapframe *tf;
63 	sigset_t mask;
64 
65 	/*
66 	 * The trampoline code hands us the context.
67 	 * It is unsafe to keep track of it ourselves, in the event that a
68 	 * program jumps out of a signal handler.
69 	 */
70 	scp = SCARG(uap, sigcntxp);
71 	if (copyin((void *)scp, &context, sizeof(*scp)) != 0)
72 		return (EFAULT);
73 
74 	/* Restore register context. */
75 	tf = l->l_md.md_regs;
76 #ifdef VM86
77 	if (context.sc_eflags & PSL_VM) {
78 		void syscall_vm86(struct trapframe *);
79 
80 		tf->tf_vm86_gs = context.sc_gs;
81 		tf->tf_vm86_fs = context.sc_fs;
82 		tf->tf_vm86_es = context.sc_es;
83 		tf->tf_vm86_ds = context.sc_ds;
84 		set_vflags(l, context.sc_eflags);
85 		p->p_md.md_syscall = syscall_vm86;
86 	} else
87 #endif
88 	{
89 		/*
90 		 * Check for security violations.  If we're returning to
91 		 * protected mode, the CPU will validate the segment registers
92 		 * automatically and generate a trap on violations.  We handle
93 		 * the trap, rather than doing all of the checking here.
94 		 */
95 		if (((context.sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 ||
96 		    !USERMODE(context.sc_cs, context.sc_eflags))
97 			return (EINVAL);
98 
99 		tf->tf_gs = context.sc_gs;
100 		tf->tf_fs = context.sc_fs;
101 		tf->tf_es = context.sc_es;
102 		tf->tf_ds = context.sc_ds;
103 		tf->tf_eflags &= ~PSL_USER;
104 		tf->tf_eflags |= context.sc_eflags & PSL_USER;
105 	}
106 	tf->tf_edi = context.sc_edi;
107 	tf->tf_esi = context.sc_esi;
108 	tf->tf_ebp = context.sc_ebp;
109 	tf->tf_ebx = context.sc_ebx;
110 	tf->tf_edx = context.sc_edx;
111 	tf->tf_ecx = context.sc_ecx;
112 	tf->tf_eax = context.sc_eax;
113 	tf->tf_eip = context.sc_eip;
114 	tf->tf_cs = context.sc_cs;
115 	tf->tf_esp = context.sc_esp;
116 	tf->tf_ss = context.sc_ss;
117 
118 	mutex_enter(p->p_lock);
119 	/* Restore signal stack. */
120 	if (context.sc_onstack & SS_ONSTACK)
121 		l->l_sigstk.ss_flags |= SS_ONSTACK;
122 	else
123 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
124 	/* Restore signal mask. */
125 	native_sigset13_to_sigset(&context.sc_mask, &mask);
126 	(void) sigprocmask1(l, SIG_SETMASK, &mask, 0);
127 	mutex_exit(p->p_lock);
128 
129 	return (EJUSTRETURN);
130 }
131