xref: /netbsd/sys/arch/amd64/amd64/netbsd32_machdep.c (revision 2c253072)
1 /*	$NetBSD: netbsd32_machdep.c,v 1.141 2022/08/20 23:49:31 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Frank van der Linden for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed for the NetBSD Project by
20  *      Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: netbsd32_machdep.c,v 1.141 2022/08/20 23:49:31 riastradh Exp $");
40 
41 #ifdef _KERNEL_OPT
42 #include "opt_compat_netbsd.h"
43 #include "opt_compat_netbsd32.h"
44 #include "opt_execfmt.h"
45 #include "opt_user_ldt.h"
46 #include "opt_mtrr.h"
47 #endif
48 
49 #include <sys/param.h>
50 #include <sys/exec.h>
51 #include <sys/exec_aout.h>
52 #include <sys/kmem.h>
53 #include <sys/kmem.h>
54 #include <sys/proc.h>
55 #include <sys/signalvar.h>
56 #include <sys/systm.h>
57 #include <sys/core.h>
58 #include <sys/mount.h>
59 #include <sys/buf.h>
60 #include <sys/vnode.h>
61 #include <sys/ras.h>
62 #include <sys/ptrace.h>
63 #include <sys/kauth.h>
64 #include <sys/compat_stub.h>
65 
66 #include <x86/fpu.h>
67 #include <x86/dbregs.h>
68 #include <machine/frame.h>
69 #include <machine/reg.h>
70 #include <machine/vmparam.h>
71 #ifdef MTRR
72 #include <machine/mtrr.h>
73 #endif
74 #include <machine/netbsd32_machdep.h>
75 #include <machine/sysarch.h>
76 #include <machine/userret.h>
77 #include <machine/gdt.h>
78 #include <machine/pmap_private.h>
79 
80 #include <compat/netbsd32/netbsd32.h>
81 #include <compat/netbsd32/netbsd32_exec.h>
82 #include <compat/netbsd32/netbsd32_syscallargs.h>
83 
84 #include <compat/sys/signal.h>
85 #include <compat/sys/signalvar.h>
86 
87 /* Provide a the name of the architecture we're emulating */
88 const char machine32[] = "i386";
89 const char machine_arch32[] = "i386";
90 
91 static int netbsd32_process_doxmmregs(struct lwp *, struct lwp *, void *, bool);
92 static int netbsd32_process_xmmregio(struct lwp *, struct lwp *, struct uio *);
93 
94 #ifdef USER_LDT
95 static int x86_64_get_ldt32(struct lwp *, void *, register_t *);
96 static int x86_64_set_ldt32(struct lwp *, void *, register_t *);
97 #else
98 #define x86_64_get_ldt32(x, y, z)	ENOSYS
99 #define x86_64_set_ldt32(x, y, z)	ENOSYS
100 #endif
101 
102 #ifdef MTRR
103 static int x86_64_get_mtrr32(struct lwp *, void *, register_t *);
104 static int x86_64_set_mtrr32(struct lwp *, void *, register_t *);
105 #else
106 #define x86_64_get_mtrr32(x, y, z)	ENOSYS
107 #define x86_64_set_mtrr32(x, y, z)	ENOSYS
108 #endif
109 
110 int check_sigcontext32(struct lwp *, const struct netbsd32_sigcontext *);
111 void netbsd32_buildcontext(struct lwp *, struct trapframe *, void *,
112     sig_t, int);
113 
114 #ifdef EXEC_AOUT
115 /*
116  * There is no native a.out -- this function is required
117  * for i386 a.out emulation (COMPAT_NETBSD32+EXEC_AOUT).
118  */
119 int
cpu_exec_aout_makecmds(struct lwp * p,struct exec_package * e)120 cpu_exec_aout_makecmds(struct lwp *p, struct exec_package *e)
121 {
122 
123 	return ENOEXEC;
124 }
125 #endif
126 
127 void
netbsd32_setregs(struct lwp * l,struct exec_package * pack,vaddr_t stack)128 netbsd32_setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
129 {
130 	struct pcb *pcb;
131 	struct trapframe *tf;
132 	struct proc *p = l->l_proc;
133 
134 	pcb = lwp_getpcb(l);
135 
136 #if defined(USER_LDT)
137 	pmap_ldt_cleanup(l);
138 #endif
139 
140 	netbsd32_adjust_limits(p);
141 
142 	fpu_clear(l, pack->ep_osversion >= 699002600
143 	    ?  __NetBSD_NPXCW__ : __NetBSD_COMPAT_NPXCW__);
144 	x86_dbregs_clear(l);
145 
146 	kpreempt_disable();
147 	pcb->pcb_flags = PCB_COMPAT32;
148 	p->p_flag |= PK_32;
149 	l->l_md.md_flags = MDL_COMPAT32;	/* force iret not sysret */
150 	cpu_segregs32_zero(l);
151 	cpu_fsgs_reload(l, LSEL(LUDATA32_SEL, SEL_UPL),
152 	    LSEL(LUDATA32_SEL, SEL_UPL));
153 	kpreempt_enable();
154 
155 	tf = l->l_md.md_regs;
156 	tf->tf_ds = LSEL(LUDATA32_SEL, SEL_UPL);
157 	tf->tf_es = LSEL(LUDATA32_SEL, SEL_UPL);
158 	tf->tf_rdi = 0;
159 	tf->tf_rsi = 0;
160 	tf->tf_rbp = 0;
161 	tf->tf_rbx = (uint32_t)p->p_psstrp;
162 	tf->tf_rdx = 0;
163 	tf->tf_rcx = 0;
164 	tf->tf_rax = 0;
165 	tf->tf_rip = pack->ep_entry;
166 	tf->tf_cs = LSEL(LUCODE32_SEL, SEL_UPL);
167 	tf->tf_rflags = PSL_USERSET;
168 	tf->tf_rsp = stack;
169 	tf->tf_ss = LSEL(LUDATA32_SEL, SEL_UPL);
170 }
171 
172 void
netbsd32_buildcontext(struct lwp * l,struct trapframe * tf,void * fp,sig_t catcher,int onstack)173 netbsd32_buildcontext(struct lwp *l, struct trapframe *tf, void *fp,
174     sig_t catcher, int onstack)
175 {
176 	/*
177 	 * Build context to run handler in.
178 	 */
179 	tf->tf_ds = GSEL(GUDATA32_SEL, SEL_UPL);
180 	tf->tf_es = GSEL(GUDATA32_SEL, SEL_UPL);
181 #if 0
182 	tf->tf_fs = GSEL(GUDATA32_SEL, SEL_UPL);
183 	tf->tf_gs = GSEL(GUDATA32_SEL, SEL_UPL);
184 #endif
185 
186 	/* Ensure FP state is sane. */
187 	fpu_sigreset(l);
188 
189 	tf->tf_rip = (uint64_t)catcher;
190 	tf->tf_cs = GSEL(GUCODE32_SEL, SEL_UPL);
191 	tf->tf_rflags &= ~PSL_CLEARSIG;
192 	tf->tf_rsp = (uint64_t)fp;
193 	tf->tf_ss = GSEL(GUDATA32_SEL, SEL_UPL);
194 
195 	/* Remember that we're now on the signal stack. */
196 	if (onstack)
197 		l->l_sigstk.ss_flags |= SS_ONSTACK;
198 	if ((vaddr_t)catcher >= VM_MAXUSER_ADDRESS32) {
199 		/*
200 		 * process has given an invalid address for the
201 		 * handler. Stop it, but do not do it before so
202 		 * we can return the right info to userland (or in core dump)
203 		 */
204 		sigexit(l, SIGILL);
205 		/* NOTREACHED */
206 	}
207 }
208 
209 void
netbsd32_sendsig_siginfo(const ksiginfo_t * ksi,const sigset_t * mask)210 netbsd32_sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
211 {
212 	struct lwp *l = curlwp;
213 	struct proc *p = l->l_proc;
214 	struct sigacts *ps = p->p_sigacts;
215 	int onstack, error;
216 	int sig = ksi->ksi_signo;
217 	struct netbsd32_sigframe_siginfo *fp, frame;
218 	const struct sigaction *sa = &SIGACTION(p, sig);
219 	sig_t catcher = sa->sa_handler;
220 	struct trapframe *tf = l->l_md.md_regs;
221 	stack_t * const ss = &l->l_sigstk;
222 
223 	/* Do we need to jump onto the signal stack? */
224 	onstack =
225 	    (ss->ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
226 	    (sa->sa_flags & SA_ONSTACK) != 0;
227 
228 	/* Allocate space for the signal handler context. */
229 	if (onstack)
230 		fp = (struct netbsd32_sigframe_siginfo *)
231 		    ((char *)ss->ss_sp + ss->ss_size);
232 	else
233 		fp = (struct netbsd32_sigframe_siginfo *)tf->tf_rsp;
234 
235 	fp--;
236 
237 	/* Build stack frame for signal trampoline. */
238 	switch (ps->sa_sigdesc[sig].sd_vers) {
239 	case __SIGTRAMP_SIGCODE_VERSION:     /* handled by sendsig_sigcontext */
240 	case __SIGTRAMP_SIGCONTEXT_VERSION: /* handled by sendsig_sigcontext */
241 	default:	/* unknown version */
242 		printf("nsendsig: bad version %d\n",
243 		    ps->sa_sigdesc[sig].sd_vers);
244 		sigexit(l, SIGILL);
245 	case __SIGTRAMP_SIGINFO_VERSION:
246 		break;
247 	}
248 
249 	memset(&frame, 0, sizeof(frame));
250 	frame.sf_ra = (uint32_t)(uintptr_t)ps->sa_sigdesc[sig].sd_tramp;
251 	frame.sf_signum = sig;
252 	frame.sf_sip = (uint32_t)(uintptr_t)&fp->sf_si;
253 	frame.sf_ucp = (uint32_t)(uintptr_t)&fp->sf_uc;
254 	netbsd32_si_to_si32(&frame.sf_si, (const siginfo_t *)&ksi->ksi_info);
255 	frame.sf_uc.uc_flags = _UC_SIGMASK;
256 	frame.sf_uc.uc_sigmask = *mask;
257 	frame.sf_uc.uc_link = (uint32_t)(uintptr_t)l->l_ctxlink;
258 	frame.sf_uc.uc_flags |= (ss->ss_flags & SS_ONSTACK)
259 	    ? _UC_SETSTACK : _UC_CLRSTACK;
260 	sendsig_reset(l, sig);
261 
262 	mutex_exit(p->p_lock);
263 	cpu_getmcontext32(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags);
264 	error = copyout(&frame, fp, sizeof(frame));
265 	mutex_enter(p->p_lock);
266 
267 	if (error != 0) {
268 		/*
269 		 * Process has trashed its stack; give it an illegal
270 		 * instruction to halt it in its tracks.
271 		 */
272 		sigexit(l, SIGILL);
273 		/* NOTREACHED */
274 	}
275 
276 	netbsd32_buildcontext(l, tf, fp, catcher, onstack);
277 }
278 
279 /*
280  * Dump the machine specific segment at the start of a core dump.
281  */
282 struct md_core32 {
283 	struct reg32 intreg;
284 	struct fpreg32 freg;
285 };
286 
287 int
cpu_coredump32(struct lwp * l,struct coredump_iostate * iocookie,struct core32 * chdr)288 cpu_coredump32(struct lwp *l, struct coredump_iostate *iocookie,
289     struct core32 *chdr)
290 {
291 	struct md_core32 md_core;
292 	struct coreseg cseg;
293 	int error;
294 
295 	if (iocookie == NULL) {
296 		CORE_SETMAGIC(*chdr, COREMAGIC, MID_I386, 0);
297 		chdr->c_hdrsize = ALIGN32(sizeof(*chdr));
298 		chdr->c_seghdrsize = ALIGN32(sizeof(cseg));
299 		chdr->c_cpusize = sizeof(md_core);
300 		chdr->c_nseg++;
301 		return 0;
302 	}
303 
304 	/* Save integer registers. */
305 	error = netbsd32_process_read_regs(l, &md_core.intreg);
306 	if (error)
307 		return error;
308 
309 	/* Save floating point registers. */
310 	error = netbsd32_process_read_fpregs(l, &md_core.freg, NULL);
311 	if (error)
312 		return error;
313 
314 	CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_I386, CORE_CPU);
315 	cseg.c_addr = 0;
316 	cseg.c_size = chdr->c_cpusize;
317 
318 	MODULE_HOOK_CALL(coredump_write_hook, (iocookie, UIO_SYSSPACE, &cseg,
319 	    chdr->c_seghdrsize), ENOSYS, error);
320 	if (error)
321 		return error;
322 
323 	MODULE_HOOK_CALL(coredump_write_hook, (iocookie, UIO_SYSSPACE, &md_core,
324 	    sizeof(md_core)), ENOSYS, error);
325 
326 	return error;
327 }
328 
329 int
netbsd32_ptrace_translate_request(int req)330 netbsd32_ptrace_translate_request(int req)
331 {
332 
333 	switch (req)
334 	{
335 	case 0 ... PT_FIRSTMACH - 1:	return req;
336 	case PT32_STEP:			return PT_STEP;
337 	case PT32_GETREGS:		return PT_GETREGS;
338 	case PT32_SETREGS:		return PT_SETREGS;
339 	case PT32_GETFPREGS:		return PT_GETFPREGS;
340 	case PT32_SETFPREGS:		return PT_SETFPREGS;
341 	case PT32_GETXMMREGS:		return PT_GETXMMREGS;
342 	case PT32_SETXMMREGS:		return PT_SETXMMREGS;
343 	case PT32_GETDBREGS:		return PT_GETDBREGS;
344 	case PT32_SETDBREGS:		return PT_SETDBREGS;
345 	case PT32_SETSTEP:		return PT_SETSTEP;
346 	case PT32_CLEARSTEP:		return PT_CLEARSTEP;
347 	case PT32_GETXSTATE:		return PT_GETXSTATE;
348 	case PT32_SETXSTATE:		return PT_SETXSTATE;
349 	default:			return -1;
350 	}
351 }
352 
353 int
netbsd32_process_read_regs(struct lwp * l,struct reg32 * regs)354 netbsd32_process_read_regs(struct lwp *l, struct reg32 *regs)
355 {
356 	struct trapframe *tf = l->l_md.md_regs;
357 
358 	/* XXX avoid sign extension problems with unknown upper bits? */
359 	regs->r_gs = tf->tf_gs & 0xffff;
360 	regs->r_fs = tf->tf_fs & 0xffff;
361 	regs->r_es = tf->tf_es & 0xffff;
362 	regs->r_ds = tf->tf_ds & 0xffff;
363 	regs->r_eflags = tf->tf_rflags;
364 	regs->r_edi = tf->tf_rdi & 0xffffffff;
365 	regs->r_esi = tf->tf_rsi & 0xffffffff;
366 	regs->r_ebp = tf->tf_rbp & 0xffffffff;
367 	regs->r_ebx = tf->tf_rbx & 0xffffffff;
368 	regs->r_edx = tf->tf_rdx & 0xffffffff;
369 	regs->r_ecx = tf->tf_rcx & 0xffffffff;
370 	regs->r_eax = tf->tf_rax & 0xffffffff;
371 	regs->r_eip = tf->tf_rip & 0xffffffff;
372 	regs->r_cs = tf->tf_cs & 0xffff;
373 	regs->r_esp = tf->tf_rsp & 0xffffffff;
374 	regs->r_ss = tf->tf_ss & 0xffff;
375 
376 	return 0;
377 }
378 
379 int
netbsd32_process_read_fpregs(struct lwp * l,struct fpreg32 * regs,size_t * sz)380 netbsd32_process_read_fpregs(struct lwp *l, struct fpreg32 *regs, size_t *sz)
381 {
382 
383 	__CTASSERT(sizeof(*regs) == sizeof(struct save87));
384 	process_read_fpregs_s87(l, (struct save87 *)regs);
385 	return 0;
386 }
387 
388 int
netbsd32_process_read_dbregs(struct lwp * l,struct dbreg32 * regs,size_t * sz)389 netbsd32_process_read_dbregs(struct lwp *l, struct dbreg32 *regs, size_t *sz)
390 {
391 	struct dbreg regs64;
392 
393 	x86_dbregs_read(l, &regs64);
394 	memset(regs, 0, sizeof(*regs));
395 	regs->dr[0] = regs64.dr[0] & 0xffffffff;
396 	regs->dr[1] = regs64.dr[1] & 0xffffffff;
397 	regs->dr[2] = regs64.dr[2] & 0xffffffff;
398 	regs->dr[3] = regs64.dr[3] & 0xffffffff;
399 
400 	regs->dr[6] = regs64.dr[6] & 0xffffffff;
401 	regs->dr[7] = regs64.dr[7] & 0xffffffff;
402 
403 	return 0;
404 }
405 
406 int
netbsd32_process_write_regs(struct lwp * l,const struct reg32 * regs)407 netbsd32_process_write_regs(struct lwp *l, const struct reg32 *regs)
408 {
409 	struct trapframe *tf;
410 	struct pcb *pcb;
411 
412 	tf = l->l_md.md_regs;
413 	pcb = lwp_getpcb(l);
414 
415 	/*
416 	 * Check for security violations.
417 	 */
418 	if (((regs->r_eflags ^ tf->tf_rflags) & PSL_USERSTATIC) != 0)
419 		return EINVAL;
420 	if (!VALID_USER_CSEL32(regs->r_cs))
421 		return EINVAL;
422 	if (regs->r_fs != 0 && !VALID_USER_DSEL32(regs->r_fs) &&
423 	    !(VALID_USER_FSEL32(regs->r_fs) && pcb->pcb_fs != 0))
424 		return EINVAL;
425 	if (regs->r_gs != 0 && !VALID_USER_DSEL32(regs->r_gs) &&
426 	    !(VALID_USER_GSEL32(regs->r_gs) && pcb->pcb_gs != 0))
427 		return EINVAL;
428 	if (regs->r_es != 0 && !VALID_USER_DSEL32(regs->r_es))
429 		return EINVAL;
430 	if (!VALID_USER_DSEL32(regs->r_ds) ||
431 	    !VALID_USER_DSEL32(regs->r_ss))
432 		return EINVAL;
433 	if ((u_int)regs->r_eip >= VM_MAXUSER_ADDRESS32)
434 		return EINVAL;
435 
436 	tf->tf_rax = regs->r_eax;
437 	tf->tf_rcx = regs->r_ecx;
438 	tf->tf_rdx = regs->r_edx;
439 	tf->tf_rbx = regs->r_ebx;
440 	tf->tf_rsp = regs->r_esp;
441 	tf->tf_rbp = regs->r_ebp;
442 	tf->tf_rsi = regs->r_esi;
443 	tf->tf_rdi = regs->r_edi;
444 	tf->tf_rip = regs->r_eip;
445 	tf->tf_rflags = regs->r_eflags;
446 	tf->tf_cs = regs->r_cs & 0xFFFF;
447 	tf->tf_ss = regs->r_ss & 0xFFFF;
448 	tf->tf_ds = regs->r_ds & 0xFFFF;
449 	tf->tf_es = regs->r_es & 0xFFFF;
450 	tf->tf_fs = regs->r_fs & 0xFFFF;
451 	tf->tf_gs = regs->r_gs & 0xFFFF;
452 
453 	return 0;
454 }
455 
456 int
netbsd32_process_write_fpregs(struct lwp * l,const struct fpreg32 * regs,size_t sz)457 netbsd32_process_write_fpregs(struct lwp *l, const struct fpreg32 *regs,
458     size_t sz)
459 {
460 
461 	__CTASSERT(sizeof(*regs) == sizeof(struct save87));
462 	process_write_fpregs_s87(l, (const struct save87 *)regs);
463 	return 0;
464 }
465 
466 int
netbsd32_process_write_dbregs(struct lwp * l,const struct dbreg32 * regs,size_t sz)467 netbsd32_process_write_dbregs(struct lwp *l, const struct dbreg32 *regs,
468     size_t sz)
469 {
470 	size_t i;
471 	struct dbreg regs64;
472 
473 	/* Check that DR0-DR3 contain user-space address */
474 	for (i = 0; i < X86_DBREGS; i++) {
475 		if ((u_int)regs->dr[i] >= VM_MAXUSER_ADDRESS32)
476 			return EINVAL;
477 	}
478 
479 	if (regs->dr[7] & X86_DR7_GENERAL_DETECT_ENABLE) {
480 		return EINVAL;
481 	}
482 
483 	memset(&regs64, 0, sizeof(regs64));
484 
485 	regs64.dr[0] = (u_int)regs->dr[0];
486 	regs64.dr[1] = (u_int)regs->dr[1];
487 	regs64.dr[2] = (u_int)regs->dr[2];
488 	regs64.dr[3] = (u_int)regs->dr[3];
489 
490 	regs64.dr[6] = (u_int)regs->dr[6];
491 	regs64.dr[7] = (u_int)regs->dr[7];
492 
493 	x86_dbregs_write(l, &regs64);
494 	return 0;
495 }
496 
497 static int
netbsd32_process_doxmmregs(struct lwp * curl,struct lwp * l,void * addr,bool write)498 netbsd32_process_doxmmregs(struct lwp *curl, struct lwp *l, void *addr,
499     bool write)
500 	/* curl:		 tracer */
501 	/* l:			 traced */
502 {
503 	struct uio uio;
504 	struct iovec iov;
505 	struct vmspace *vm;
506 	int error;
507 
508 	if ((curl->l_proc->p_flag & PK_32) == 0 ||
509 	    (l->l_proc->p_flag & PK_32) == 0)
510 		return EINVAL;
511 
512 	if (!process_machdep_validfpu(l->l_proc))
513 		return EINVAL;
514 
515 	error = proc_vmspace_getref(curl->l_proc, &vm);
516 	if (error)
517 		return error;
518 
519 	iov.iov_base = addr;
520 	iov.iov_len = sizeof(struct xmmregs32);
521 	uio.uio_iov = &iov;
522 	uio.uio_iovcnt = 1;
523 	uio.uio_offset = 0;
524 	uio.uio_resid = sizeof(struct xmmregs32);
525 	uio.uio_rw = write ? UIO_WRITE : UIO_READ;
526 	uio.uio_vmspace = vm;
527 
528 	error = netbsd32_process_xmmregio(curl, l, &uio);
529 	uvmspace_free(vm);
530 	return error;
531 }
532 
533 static int
netbsd32_process_xmmregio(struct lwp * curl,struct lwp * l,struct uio * uio)534 netbsd32_process_xmmregio(struct lwp *curl, struct lwp *l, struct uio *uio)
535 	/* curl:		 tracer */
536 	/* l:			 traced */
537 {
538 	struct xmmregs32 regs;
539 	int error;
540 	char *kv;
541 	size_t kl;
542 
543 	kl = sizeof(regs);
544 	kv = (char *)&regs;
545 
546 	if (uio->uio_offset < 0 || uio->uio_offset > (off_t)kl)
547 		return EINVAL;
548 
549 	kv += uio->uio_offset;
550 	kl -= uio->uio_offset;
551 
552 	if (kl > uio->uio_resid)
553 		kl = uio->uio_resid;
554 
555 	process_read_fpregs_xmm(l, &regs.fxstate);
556 	error = uiomove(kv, kl, uio);
557 	if (error == 0 && uio->uio_rw == UIO_WRITE) {
558 		if (l->l_proc->p_stat != SSTOP)
559 			error = EBUSY;
560 		else
561 			process_write_fpregs_xmm(l, &regs.fxstate);
562 	}
563 
564 	uio->uio_offset = 0;
565 	return error;
566 }
567 
568 int
netbsd32_sysarch(struct lwp * l,const struct netbsd32_sysarch_args * uap,register_t * retval)569 netbsd32_sysarch(struct lwp *l, const struct netbsd32_sysarch_args *uap, register_t *retval)
570 {
571 	/* {
572 		syscallarg(int) op;
573 		syscallarg(netbsd32_voidp) parms;
574 	} */
575 	int error;
576 
577 	switch (SCARG(uap, op)) {
578 	case X86_IOPL:
579 		error = x86_iopl(l,
580 		    NETBSD32PTR64(SCARG(uap, parms)), retval);
581 		break;
582 	case X86_GET_LDT:
583 		error = x86_64_get_ldt32(l,
584 		    NETBSD32PTR64(SCARG(uap, parms)), retval);
585 		break;
586 	case X86_SET_LDT:
587 		error = x86_64_set_ldt32(l,
588 		    NETBSD32PTR64(SCARG(uap, parms)), retval);
589 		break;
590 	case X86_GET_MTRR:
591 		error = x86_64_get_mtrr32(l,
592 		    NETBSD32PTR64(SCARG(uap, parms)), retval);
593 		break;
594 	case X86_SET_MTRR:
595 		error = x86_64_set_mtrr32(l,
596 		    NETBSD32PTR64(SCARG(uap, parms)), retval);
597 		break;
598 	default:
599 		error = EINVAL;
600 		break;
601 	}
602 	return error;
603 }
604 
605 #ifdef USER_LDT
606 static int
x86_64_set_ldt32(struct lwp * l,void * args,register_t * retval)607 x86_64_set_ldt32(struct lwp *l, void *args, register_t *retval)
608 {
609 	struct x86_set_ldt_args32 ua32;
610 	struct x86_set_ldt_args ua;
611 	union descriptor *descv;
612 	int error;
613 
614 	if ((error = copyin(args, &ua32, sizeof(ua32))) != 0)
615 		return error;
616 
617 	ua.start = ua32.start;
618 	ua.num = ua32.num;
619 
620 	if (ua.num < 0 || ua.num > MAX_USERLDT_SLOTS)
621 		return EINVAL;
622 
623 	const size_t alloc_size = sizeof(*descv) * ua.num;
624 
625 	descv = kmem_alloc(alloc_size, KM_SLEEP);
626 	error = copyin((void *)(uintptr_t)ua32.desc, descv,
627 	    sizeof(*descv) * ua.num);
628 	if (error == 0)
629 		error = x86_set_ldt1(l, &ua, descv);
630 	*retval = ua.start;
631 
632 	kmem_free(descv, alloc_size);
633 	return error;
634 }
635 
636 static int
x86_64_get_ldt32(struct lwp * l,void * args,register_t * retval)637 x86_64_get_ldt32(struct lwp *l, void *args, register_t *retval)
638 {
639 	struct x86_get_ldt_args32 ua32;
640 	struct x86_get_ldt_args ua;
641 	union descriptor *cp;
642 	int error;
643 
644 	if ((error = copyin(args, &ua32, sizeof(ua32))) != 0)
645 		return error;
646 
647 	ua.start = ua32.start;
648 	ua.num = ua32.num;
649 
650 	if (ua.num < 0 || ua.num > MAX_USERLDT_SLOTS)
651 		return EINVAL;
652 
653 	const size_t alloc_size = ua.num * sizeof(union descriptor);
654 
655 	cp = kmem_alloc(alloc_size, KM_SLEEP);
656 	error = x86_get_ldt1(l, &ua, cp);
657 	*retval = ua.num;
658 	if (error == 0)
659 		error = copyout(cp, (void *)(uintptr_t)ua32.desc,
660 		    ua.num * sizeof(*cp));
661 
662 	kmem_free(cp, alloc_size);
663 	return error;
664 }
665 #endif
666 
667 #ifdef MTRR
668 static int
x86_64_get_mtrr32(struct lwp * l,void * args,register_t * retval)669 x86_64_get_mtrr32(struct lwp *l, void *args, register_t *retval)
670 {
671 	struct x86_64_get_mtrr_args32 args32;
672 	int error, i;
673 	int32_t n;
674 	struct mtrr32 *m32p, m32;
675 	struct mtrr *m64p, *mp;
676 	size_t size;
677 
678 	m64p = NULL;
679 
680 	if (mtrr_funcs == NULL)
681 		return ENOSYS;
682 
683 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_GET,
684 	    NULL, NULL, NULL, NULL);
685 	if (error)
686 		return error;
687 
688 	error = copyin(args, &args32, sizeof(args32));
689 	if (error != 0)
690 		return error;
691 
692 	if (args32.mtrrp == 0) {
693 		n = (MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX);
694 		return copyout(&n, (void *)(uintptr_t)args32.n, sizeof(n));
695 	}
696 
697 	error = copyin((void *)(uintptr_t)args32.n, &n, sizeof(n));
698 	if (error != 0)
699 		return error;
700 
701 	if (n <= 0 || n > (MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX))
702 		return EINVAL;
703 
704 	size = n * sizeof(struct mtrr);
705 	m64p = kmem_zalloc(size, KM_SLEEP);
706 	error = mtrr_get(m64p, &n, l->l_proc, 0);
707 	if (error != 0)
708 		goto fail;
709 	m32p = (struct mtrr32 *)(uintptr_t)args32.mtrrp;
710 	mp = m64p;
711 	for (i = 0; i < n; i++) {
712 		m32.base = mp->base;
713 		m32.len = mp->len;
714 		m32.type = mp->type;
715 		m32.flags = mp->flags;
716 		m32.owner = mp->owner;
717 		error = copyout(&m32, m32p, sizeof(m32));
718 		if (error != 0)
719 			break;
720 		mp++;
721 		m32p++;
722 	}
723 fail:
724 	if (m64p != NULL)
725 		kmem_free(m64p, size);
726 	if (error != 0)
727 		n = 0;
728 	copyout(&n, (void *)(uintptr_t)args32.n, sizeof(n));
729 	return error;
730 }
731 
732 static int
x86_64_set_mtrr32(struct lwp * l,void * args,register_t * retval)733 x86_64_set_mtrr32(struct lwp *l, void *args, register_t *retval)
734 {
735 	struct x86_64_set_mtrr_args32 args32;
736 	struct mtrr32 *m32p, m32;
737 	struct mtrr *m64p, *mp;
738 	int error, i;
739 	int32_t n;
740 	size_t size;
741 
742 	m64p = NULL;
743 
744 	if (mtrr_funcs == NULL)
745 		return ENOSYS;
746 
747 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET,
748 	    NULL, NULL, NULL, NULL);
749 	if (error)
750 		return error;
751 
752 	error = copyin(args, &args32, sizeof(args32));
753 	if (error != 0)
754 		return error;
755 
756 	error = copyin((void *)(uintptr_t)args32.n, &n, sizeof(n));
757 	if (error != 0)
758 		return error;
759 
760 	if (n <= 0 || n > (MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX)) {
761 		error = EINVAL;
762 		goto fail;
763 	}
764 
765 	size = n * sizeof(struct mtrr);
766 	m64p = kmem_zalloc(size, KM_SLEEP);
767 	m32p = (struct mtrr32 *)(uintptr_t)args32.mtrrp;
768 	mp = m64p;
769 	for (i = 0; i < n; i++) {
770 		error = copyin(m32p, &m32, sizeof(m32));
771 		if (error != 0)
772 			goto fail;
773 		mp->base = m32.base;
774 		mp->len = m32.len;
775 		mp->type = m32.type;
776 		mp->flags = m32.flags;
777 		mp->owner = m32.owner;
778 		m32p++;
779 		mp++;
780 	}
781 
782 	error = mtrr_set(m64p, &n, l->l_proc, 0);
783 fail:
784 	if (m64p != NULL)
785 		kmem_free(m64p, size);
786 	if (error != 0)
787 		n = 0;
788 	copyout(&n, (void *)(uintptr_t)args32.n, sizeof(n));
789 	return error;
790 }
791 #endif
792 
793 int
cpu_setmcontext32(struct lwp * l,const mcontext32_t * mcp,unsigned int flags)794 cpu_setmcontext32(struct lwp *l, const mcontext32_t *mcp, unsigned int flags)
795 {
796 	struct trapframe *tf = l->l_md.md_regs;
797 	const __greg32_t *gr = mcp->__gregs;
798 	struct proc *p = l->l_proc;
799 	int error;
800 
801 	/* Restore register context, if any. */
802 	if ((flags & _UC_CPU) != 0) {
803 		/*
804 		 * Check for security violations.
805 		 */
806 		error = cpu_mcontext32_validate(l, mcp);
807 		if (error != 0)
808 			return error;
809 
810 		cpu_fsgs_reload(l, gr[_REG32_FS], gr[_REG32_GS]);
811 		tf->tf_es = gr[_REG32_ES] & 0xFFFF;
812 		tf->tf_ds = gr[_REG32_DS] & 0xFFFF;
813 		/* Only change the user-alterable part of eflags */
814 		tf->tf_rflags &= ~PSL_USER;
815 		tf->tf_rflags |= (gr[_REG32_EFL] & PSL_USER);
816 		tf->tf_rdi    = gr[_REG32_EDI];
817 		tf->tf_rsi    = gr[_REG32_ESI];
818 		tf->tf_rbp    = gr[_REG32_EBP];
819 		tf->tf_rbx    = gr[_REG32_EBX];
820 		tf->tf_rdx    = gr[_REG32_EDX];
821 		tf->tf_rcx    = gr[_REG32_ECX];
822 		tf->tf_rax    = gr[_REG32_EAX];
823 		tf->tf_rip    = gr[_REG32_EIP];
824 		tf->tf_cs     = gr[_REG32_CS] & 0xFFFF;
825 		tf->tf_rsp    = gr[_REG32_UESP];
826 		tf->tf_ss     = gr[_REG32_SS] & 0xFFFF;
827 	}
828 
829 	if ((flags & _UC_TLSBASE) != 0)
830 		lwp_setprivate(l, (void *)(uintptr_t)mcp->_mc_tlsbase);
831 
832 	/* Restore floating point register context, if any. */
833 	if ((flags & _UC_FPU) != 0) {
834 		/* Assume fxsave context */
835 		process_write_fpregs_xmm(l, (const struct fxsave *)
836 		    &mcp->__fpregs.__fp_reg_set.__fp_xmm_state);
837 	}
838 
839 	mutex_enter(p->p_lock);
840 	if (flags & _UC_SETSTACK)
841 		l->l_sigstk.ss_flags |= SS_ONSTACK;
842 	if (flags & _UC_CLRSTACK)
843 		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
844 	mutex_exit(p->p_lock);
845 
846 	return 0;
847 }
848 
849 void
cpu_getmcontext32(struct lwp * l,mcontext32_t * mcp,unsigned int * flags)850 cpu_getmcontext32(struct lwp *l, mcontext32_t *mcp, unsigned int *flags)
851 {
852 	const struct trapframe *tf = l->l_md.md_regs;
853 	__greg32_t *gr = mcp->__gregs;
854 	__greg32_t ras_eip;
855 
856 	/* Save register context. */
857 	gr[_REG32_GS]  = tf->tf_gs & 0xFFFF;
858 	gr[_REG32_FS]  = tf->tf_fs & 0xFFFF;
859 	gr[_REG32_ES]  = tf->tf_es & 0xFFFF;
860 	gr[_REG32_DS]  = tf->tf_ds & 0xFFFF;
861 	gr[_REG32_EFL] = tf->tf_rflags;
862 	gr[_REG32_EDI]    = tf->tf_rdi;
863 	gr[_REG32_ESI]    = tf->tf_rsi;
864 	gr[_REG32_EBP]    = tf->tf_rbp;
865 	gr[_REG32_EBX]    = tf->tf_rbx;
866 	gr[_REG32_EDX]    = tf->tf_rdx;
867 	gr[_REG32_ECX]    = tf->tf_rcx;
868 	gr[_REG32_EAX]    = tf->tf_rax;
869 	gr[_REG32_EIP]    = tf->tf_rip;
870 	gr[_REG32_CS]     = tf->tf_cs & 0xFFFF;
871 	gr[_REG32_ESP]    = tf->tf_rsp;
872 	gr[_REG32_UESP]   = tf->tf_rsp;
873 	gr[_REG32_SS]     = tf->tf_ss & 0xFFFF;
874 	gr[_REG32_TRAPNO] = tf->tf_trapno;
875 	gr[_REG32_ERR]    = tf->tf_err;
876 
877 	if ((ras_eip = (__greg32_t)(uintptr_t)ras_lookup(l->l_proc,
878 	    (void *) (uintptr_t)gr[_REG32_EIP])) != (__greg32_t)-1)
879 		gr[_REG32_EIP] = ras_eip;
880 
881 	*flags |= _UC_CPU;
882 
883 	mcp->_mc_tlsbase = (uint32_t)(uintptr_t)l->l_private;
884 	*flags |= _UC_TLSBASE;
885 
886 	/* Save floating point register context. */
887 	process_read_fpregs_xmm(l, (struct fxsave *)
888 	    &mcp->__fpregs.__fp_reg_set.__fp_xmm_state);
889 	memset(&mcp->__fpregs.__fp_pad, 0, sizeof(mcp->__fpregs.__fp_pad));
890 	*flags |= _UC_FXSAVE | _UC_FPU;
891 }
892 
893 void
startlwp32(void * arg)894 startlwp32(void *arg)
895 {
896 	ucontext32_t *uc = arg;
897 	lwp_t *l = curlwp;
898 	int error __diagused;
899 
900 	error = cpu_setmcontext32(l, &uc->uc_mcontext, uc->uc_flags);
901 	KASSERT(error == 0);
902 
903 	/* Note: we are freeing ucontext_t, not ucontext32_t. */
904 	kmem_free(uc, sizeof(ucontext_t));
905 	userret(l);
906 }
907 
908 int
check_sigcontext32(struct lwp * l,const struct netbsd32_sigcontext * scp)909 check_sigcontext32(struct lwp *l, const struct netbsd32_sigcontext *scp)
910 {
911 	struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap;
912 	struct trapframe *tf;
913 	struct pcb *pcb;
914 
915 	tf = l->l_md.md_regs;
916 	pcb = lwp_getpcb(curlwp);
917 
918 	if (((scp->sc_eflags ^ tf->tf_rflags) & PSL_USERSTATIC) != 0)
919 		return EINVAL;
920 
921 	if (__predict_false(pmap->pm_ldt != NULL)) {
922 		/* Allow unfamiliar segment register values (USER_LDT). */
923 		if (!USERMODE(scp->sc_cs))
924 			return EINVAL;
925 	} else {
926 		if (!VALID_USER_CSEL32(scp->sc_cs))
927 			return EINVAL;
928 		if (scp->sc_fs != 0 && !VALID_USER_DSEL32(scp->sc_fs) &&
929 		    !(VALID_USER_FSEL32(scp->sc_fs) && pcb->pcb_fs != 0))
930 			return EINVAL;
931 		if (scp->sc_gs != 0 && !VALID_USER_DSEL32(scp->sc_gs) &&
932 		    !(VALID_USER_GSEL32(scp->sc_gs) && pcb->pcb_gs != 0))
933 			return EINVAL;
934 		if (scp->sc_es != 0 && !VALID_USER_DSEL32(scp->sc_es))
935 			return EINVAL;
936 		if (!VALID_USER_DSEL32(scp->sc_ds) ||
937 		    !VALID_USER_DSEL32(scp->sc_ss))
938 			return EINVAL;
939 	}
940 
941 	if (scp->sc_eip >= VM_MAXUSER_ADDRESS32)
942 		return EINVAL;
943 
944 	return 0;
945 }
946 
947 int
cpu_mcontext32_validate(struct lwp * l,const mcontext32_t * mcp)948 cpu_mcontext32_validate(struct lwp *l, const mcontext32_t *mcp)
949 {
950 	struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap;
951 	const __greg32_t *gr;
952 	struct trapframe *tf;
953 	struct pcb *pcb;
954 
955 	gr = mcp->__gregs;
956 	tf = l->l_md.md_regs;
957 	pcb = lwp_getpcb(l);
958 
959 	if (((gr[_REG32_EFL] ^ tf->tf_rflags) & PSL_USERSTATIC) != 0)
960 		return EINVAL;
961 
962 	if (__predict_false(pmap->pm_ldt != NULL)) {
963 		/* Allow unfamiliar segment register values (USER_LDT). */
964 		if (!USERMODE(gr[_REG32_CS]))
965 			return EINVAL;
966 	} else {
967 		if (!VALID_USER_CSEL32(gr[_REG32_CS]))
968 			return EINVAL;
969 		if (gr[_REG32_FS] != 0 && !VALID_USER_DSEL32(gr[_REG32_FS]) &&
970 		    !(VALID_USER_FSEL32(gr[_REG32_FS]) && pcb->pcb_fs != 0))
971 			return EINVAL;
972 		if (gr[_REG32_GS] != 0 && !VALID_USER_DSEL32(gr[_REG32_GS]) &&
973 		    !(VALID_USER_GSEL32(gr[_REG32_GS]) && pcb->pcb_gs != 0))
974 			return EINVAL;
975 		if (gr[_REG32_ES] != 0 && !VALID_USER_DSEL32(gr[_REG32_ES]))
976 			return EINVAL;
977 		if (!VALID_USER_DSEL32(gr[_REG32_DS]) ||
978 		    !VALID_USER_DSEL32(gr[_REG32_SS]))
979 			return EINVAL;
980 	}
981 
982 	if (gr[_REG32_EIP] >= VM_MAXUSER_ADDRESS32)
983 		return EINVAL;
984 
985 	return 0;
986 }
987 
988 static int
cpu_mcontext32from64_validate(struct lwp * l,const struct reg * regp)989 cpu_mcontext32from64_validate(struct lwp *l, const struct reg *regp)
990 {
991 	mcontext32_t mc;
992 	__greg32_t *gr32 = mc.__gregs;
993 	const __greg_t *gr = regp->regs;
994 
995 	memset(&mc, 0, sizeof(mc));
996 	gr32[_REG32_EFL] = gr[_REG_RFLAGS];
997 	gr32[_REG32_EIP] = gr[_REG_RIP];
998 	gr32[_REG32_CS] = gr[_REG_CS];
999 	gr32[_REG32_DS] = gr[_REG_DS];
1000 	gr32[_REG32_ES] = gr[_REG_ES];
1001 	gr32[_REG32_FS] = gr[_REG_FS];
1002 	gr32[_REG32_GS] = gr[_REG_GS];
1003 	gr32[_REG32_SS] = gr[_REG_SS];
1004 	return cpu_mcontext32_validate(l, &mc);
1005 }
1006 
1007 vaddr_t
netbsd32_vm_default_addr(struct proc * p,vaddr_t base,vsize_t sz,int topdown)1008 netbsd32_vm_default_addr(struct proc *p, vaddr_t base, vsize_t sz,
1009     int topdown)
1010 {
1011 	if (topdown)
1012 		return VM_DEFAULT_ADDRESS32_TOPDOWN(base, sz);
1013 	else
1014 		return VM_DEFAULT_ADDRESS32_BOTTOMUP(base, sz);
1015 }
1016 
1017 static const char *
netbsd32_machine32(void)1018 netbsd32_machine32(void)
1019 {
1020 
1021 	return machine32;
1022 }
1023 
1024 void
netbsd32_machdep_md_init(void)1025 netbsd32_machdep_md_init(void)
1026 {
1027 
1028 	MODULE_HOOK_SET(netbsd32_machine32_hook, netbsd32_machine32);
1029 	MODULE_HOOK_SET(netbsd32_reg_validate_hook,
1030 	    cpu_mcontext32from64_validate);
1031 	MODULE_HOOK_SET(netbsd32_process_doxmmregs_hook,
1032 	    netbsd32_process_doxmmregs);
1033 }
1034 
1035 void
netbsd32_machdep_md_fini(void)1036 netbsd32_machdep_md_fini(void)
1037 {
1038 
1039 	MODULE_HOOK_UNSET(netbsd32_machine32_hook);
1040 	MODULE_HOOK_UNSET(netbsd32_reg_validate_hook);
1041 	MODULE_HOOK_UNSET(netbsd32_process_doxmmregs_hook);
1042 }
1043