xref: /freebsd/sys/i386/linux/linux_machdep.c (revision 3460fab5)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2000 Marcel Moolenaar
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "opt_posix.h"
30 
31 #include <sys/param.h>
32 #include <sys/imgact_aout.h>
33 #include <sys/fcntl.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mman.h>
37 #include <sys/mutex.h>
38 #include <sys/namei.h>
39 #include <sys/priv.h>
40 #include <sys/proc.h>
41 #include <sys/racct.h>
42 #include <sys/resource.h>
43 #include <sys/resourcevar.h>
44 #include <sys/syscallsubr.h>
45 #include <sys/sysproto.h>
46 #include <sys/vnode.h>
47 
48 #include <security/audit/audit.h>
49 #include <security/mac/mac_framework.h>
50 
51 #include <machine/frame.h>
52 #include <machine/pcb.h>			/* needed for pcb definition in linux_set_thread_area */
53 #include <machine/psl.h>
54 #include <machine/segments.h>
55 #include <machine/sysarch.h>
56 
57 #include <vm/pmap.h>
58 #include <vm/vm.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_map.h>
62 #include <vm/vm_param.h>
63 
64 #include <x86/reg.h>
65 
66 #include <i386/linux/linux.h>
67 #include <i386/linux/linux_proto.h>
68 #include <compat/linux/linux_emul.h>
69 #include <compat/linux/linux_fork.h>
70 #include <compat/linux/linux_ipc.h>
71 #include <compat/linux/linux_misc.h>
72 #include <compat/linux/linux_mmap.h>
73 #include <compat/linux/linux_signal.h>
74 #include <compat/linux/linux_util.h>
75 
76 
77 struct l_descriptor {
78 	l_uint		entry_number;
79 	l_ulong		base_addr;
80 	l_uint		limit;
81 	l_uint		seg_32bit:1;
82 	l_uint		contents:2;
83 	l_uint		read_exec_only:1;
84 	l_uint		limit_in_pages:1;
85 	l_uint		seg_not_present:1;
86 	l_uint		useable:1;
87 };
88 
89 struct l_old_select_argv {
90 	l_int		nfds;
91 	l_fd_set	*readfds;
92 	l_fd_set	*writefds;
93 	l_fd_set	*exceptfds;
94 	struct l_timeval	*timeout;
95 };
96 
97 struct l_ipc_kludge {
98 	struct l_msgbuf *msgp;
99 	l_long msgtyp;
100 };
101 
102 int
103 linux_ipc(struct thread *td, struct linux_ipc_args *args)
104 {
105 
106 	switch (args->what & 0xFFFF) {
107 	case LINUX_SEMOP: {
108 
109 		return (kern_semop(td, args->arg1, PTRIN(args->ptr),
110 		    args->arg2, NULL));
111 	}
112 	case LINUX_SEMGET: {
113 		struct linux_semget_args a;
114 
115 		a.key = args->arg1;
116 		a.nsems = args->arg2;
117 		a.semflg = args->arg3;
118 		return (linux_semget(td, &a));
119 	}
120 	case LINUX_SEMCTL: {
121 		struct linux_semctl_args a;
122 		int error;
123 
124 		a.semid = args->arg1;
125 		a.semnum = args->arg2;
126 		a.cmd = args->arg3;
127 		error = copyin(PTRIN(args->ptr), &a.arg, sizeof(a.arg));
128 		if (error)
129 			return (error);
130 		return (linux_semctl(td, &a));
131 	}
132 	case LINUX_SEMTIMEDOP: {
133 		struct linux_semtimedop_args a;
134 
135 		a.semid = args->arg1;
136 		a.tsops = PTRIN(args->ptr);
137 		a.nsops = args->arg2;
138 		a.timeout = PTRIN(args->arg5);
139 		return (linux_semtimedop(td, &a));
140 	}
141 	case LINUX_MSGSND: {
142 		struct linux_msgsnd_args a;
143 
144 		a.msqid = args->arg1;
145 		a.msgp = PTRIN(args->ptr);
146 		a.msgsz = args->arg2;
147 		a.msgflg = args->arg3;
148 		return (linux_msgsnd(td, &a));
149 	}
150 	case LINUX_MSGRCV: {
151 		struct linux_msgrcv_args a;
152 
153 		a.msqid = args->arg1;
154 		a.msgsz = args->arg2;
155 		a.msgflg = args->arg3;
156 		if ((args->what >> 16) == 0) {
157 			struct l_ipc_kludge tmp;
158 			int error;
159 
160 			if (args->ptr == 0)
161 				return (EINVAL);
162 			error = copyin(PTRIN(args->ptr), &tmp, sizeof(tmp));
163 			if (error)
164 				return (error);
165 			a.msgp = PTRIN(tmp.msgp);
166 			a.msgtyp = tmp.msgtyp;
167 		} else {
168 			a.msgp = PTRIN(args->ptr);
169 			a.msgtyp = args->arg5;
170 		}
171 		return (linux_msgrcv(td, &a));
172 	}
173 	case LINUX_MSGGET: {
174 		struct linux_msgget_args a;
175 
176 		a.key = args->arg1;
177 		a.msgflg = args->arg2;
178 		return (linux_msgget(td, &a));
179 	}
180 	case LINUX_MSGCTL: {
181 		struct linux_msgctl_args a;
182 
183 		a.msqid = args->arg1;
184 		a.cmd = args->arg2;
185 		a.buf = PTRIN(args->ptr);
186 		return (linux_msgctl(td, &a));
187 	}
188 	case LINUX_SHMAT: {
189 		struct linux_shmat_args a;
190 		l_uintptr_t addr;
191 		int error;
192 
193 		a.shmid = args->arg1;
194 		a.shmaddr = PTRIN(args->ptr);
195 		a.shmflg = args->arg2;
196 		error = linux_shmat(td, &a);
197 		if (error != 0)
198 			return (error);
199 		addr = td->td_retval[0];
200 		error = copyout(&addr, PTRIN(args->arg3), sizeof(addr));
201 		td->td_retval[0] = 0;
202 		return (error);
203 	}
204 	case LINUX_SHMDT: {
205 		struct linux_shmdt_args a;
206 
207 		a.shmaddr = PTRIN(args->ptr);
208 		return (linux_shmdt(td, &a));
209 	}
210 	case LINUX_SHMGET: {
211 		struct linux_shmget_args a;
212 
213 		a.key = args->arg1;
214 		a.size = args->arg2;
215 		a.shmflg = args->arg3;
216 		return (linux_shmget(td, &a));
217 	}
218 	case LINUX_SHMCTL: {
219 		struct linux_shmctl_args a;
220 
221 		a.shmid = args->arg1;
222 		a.cmd = args->arg2;
223 		a.buf = PTRIN(args->ptr);
224 		return (linux_shmctl(td, &a));
225 	}
226 	default:
227 		break;
228 	}
229 
230 	return (EINVAL);
231 }
232 
233 int
234 linux_old_select(struct thread *td, struct linux_old_select_args *args)
235 {
236 	struct l_old_select_argv linux_args;
237 	struct linux_select_args newsel;
238 	int error;
239 
240 	error = copyin(args->ptr, &linux_args, sizeof(linux_args));
241 	if (error)
242 		return (error);
243 
244 	newsel.nfds = linux_args.nfds;
245 	newsel.readfds = linux_args.readfds;
246 	newsel.writefds = linux_args.writefds;
247 	newsel.exceptfds = linux_args.exceptfds;
248 	newsel.timeout = linux_args.timeout;
249 	return (linux_select(td, &newsel));
250 }
251 
252 int
253 linux_set_cloned_tls(struct thread *td, void *desc)
254 {
255 	struct segment_descriptor sd;
256 	struct l_user_desc info;
257 	int idx, error;
258 	int a[2];
259 
260 	error = copyin(desc, &info, sizeof(struct l_user_desc));
261 	if (error) {
262 		linux_msg(td, "set_cloned_tls copyin failed!");
263 	} else {
264 		idx = info.entry_number;
265 
266 		/*
267 		 * looks like we're getting the idx we returned
268 		 * in the set_thread_area() syscall
269 		 */
270 		if (idx != 6 && idx != 3) {
271 			linux_msg(td, "set_cloned_tls resetting idx!");
272 			idx = 3;
273 		}
274 
275 		/* this doesnt happen in practice */
276 		if (idx == 6) {
277 			/* we might copy out the entry_number as 3 */
278 			info.entry_number = 3;
279 			error = copyout(&info, desc, sizeof(struct l_user_desc));
280 			if (error)
281 				linux_msg(td, "set_cloned_tls copyout failed!");
282 		}
283 
284 		a[0] = LINUX_LDT_entry_a(&info);
285 		a[1] = LINUX_LDT_entry_b(&info);
286 
287 		memcpy(&sd, &a, sizeof(a));
288 		/* set %gs */
289 		td->td_pcb->pcb_gsd = sd;
290 		td->td_pcb->pcb_gs = GSEL(GUGS_SEL, SEL_UPL);
291 	}
292 
293 	return (error);
294 }
295 
296 int
297 linux_set_upcall(struct thread *td, register_t stack)
298 {
299 
300 	if (stack)
301 		td->td_frame->tf_esp = stack;
302 
303 	/*
304 	 * The newly created Linux thread returns
305 	 * to the user space by the same path that a parent do.
306 	 */
307 	td->td_frame->tf_eax = 0;
308 	return (0);
309 }
310 
311 int
312 linux_mmap2(struct thread *td, struct linux_mmap2_args *args)
313 {
314 
315 	return (linux_mmap_common(td, args->addr, args->len, args->prot,
316 		args->flags, args->fd, (uint64_t)(uint32_t)args->pgoff *
317 		PAGE_SIZE));
318 }
319 
320 int
321 linux_mmap(struct thread *td, struct linux_mmap_args *args)
322 {
323 	int error;
324 	struct l_mmap_argv linux_args;
325 
326 	error = copyin(args->ptr, &linux_args, sizeof(linux_args));
327 	if (error)
328 		return (error);
329 
330 	return (linux_mmap_common(td, linux_args.addr, linux_args.len,
331 	    linux_args.prot, linux_args.flags, linux_args.fd,
332 	    (uint32_t)linux_args.pgoff));
333 }
334 
335 int
336 linux_mprotect(struct thread *td, struct linux_mprotect_args *uap)
337 {
338 
339 	return (linux_mprotect_common(td, PTROUT(uap->addr), uap->len, uap->prot));
340 }
341 
342 int
343 linux_madvise(struct thread *td, struct linux_madvise_args *uap)
344 {
345 
346 	return (linux_madvise_common(td, PTROUT(uap->addr), uap->len, uap->behav));
347 }
348 
349 int
350 linux_ioperm(struct thread *td, struct linux_ioperm_args *args)
351 {
352 	int error;
353 	struct i386_ioperm_args iia;
354 
355 	iia.start = args->start;
356 	iia.length = args->length;
357 	iia.enable = args->enable;
358 	error = i386_set_ioperm(td, &iia);
359 	return (error);
360 }
361 
362 int
363 linux_iopl(struct thread *td, struct linux_iopl_args *args)
364 {
365 	int error;
366 
367 	if (args->level < 0 || args->level > 3)
368 		return (EINVAL);
369 	if ((error = priv_check(td, PRIV_IO)) != 0)
370 		return (error);
371 	if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
372 		return (error);
373 	td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) |
374 	    (args->level * (PSL_IOPL / 3));
375 	return (0);
376 }
377 
378 int
379 linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap)
380 {
381 	int error;
382 	struct i386_ldt_args ldt;
383 	struct l_descriptor ld;
384 	union descriptor desc;
385 	int size, written;
386 
387 	switch (uap->func) {
388 	case 0x00: /* read_ldt */
389 		ldt.start = 0;
390 		ldt.descs = uap->ptr;
391 		ldt.num = uap->bytecount / sizeof(union descriptor);
392 		error = i386_get_ldt(td, &ldt);
393 		td->td_retval[0] *= sizeof(union descriptor);
394 		break;
395 	case 0x02: /* read_default_ldt = 0 */
396 		size = 5*sizeof(struct l_desc_struct);
397 		if (size > uap->bytecount)
398 			size = uap->bytecount;
399 		for (written = error = 0; written < size && error == 0; written++)
400 			error = subyte((char *)uap->ptr + written, 0);
401 		td->td_retval[0] = written;
402 		break;
403 	case 0x01: /* write_ldt */
404 	case 0x11: /* write_ldt */
405 		if (uap->bytecount != sizeof(ld))
406 			return (EINVAL);
407 
408 		error = copyin(uap->ptr, &ld, sizeof(ld));
409 		if (error)
410 			return (error);
411 
412 		ldt.start = ld.entry_number;
413 		ldt.descs = &desc;
414 		ldt.num = 1;
415 		desc.sd.sd_lolimit = (ld.limit & 0x0000ffff);
416 		desc.sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16;
417 		desc.sd.sd_lobase = (ld.base_addr & 0x00ffffff);
418 		desc.sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24;
419 		desc.sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) |
420 			(ld.contents << 2);
421 		desc.sd.sd_dpl = 3;
422 		desc.sd.sd_p = (ld.seg_not_present ^ 1);
423 		desc.sd.sd_xx = 0;
424 		desc.sd.sd_def32 = ld.seg_32bit;
425 		desc.sd.sd_gran = ld.limit_in_pages;
426 		error = i386_set_ldt(td, &ldt, &desc);
427 		break;
428 	default:
429 		error = ENOSYS;
430 		break;
431 	}
432 
433 	if (error == EOPNOTSUPP) {
434 		linux_msg(td, "modify_ldt needs kernel option USER_LDT");
435 		error = ENOSYS;
436 	}
437 
438 	return (error);
439 }
440 
441 int
442 linux_sigaction(struct thread *td, struct linux_sigaction_args *args)
443 {
444 	l_osigaction_t osa;
445 	l_sigaction_t act, oact;
446 	int error;
447 
448 	if (args->nsa != NULL) {
449 		error = copyin(args->nsa, &osa, sizeof(l_osigaction_t));
450 		if (error)
451 			return (error);
452 		act.lsa_handler = osa.lsa_handler;
453 		act.lsa_flags = osa.lsa_flags;
454 		act.lsa_restorer = osa.lsa_restorer;
455 		LINUX_SIGEMPTYSET(act.lsa_mask);
456 		act.lsa_mask.__mask = osa.lsa_mask;
457 	}
458 
459 	error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL,
460 	    args->osa ? &oact : NULL);
461 
462 	if (args->osa != NULL && !error) {
463 		osa.lsa_handler = oact.lsa_handler;
464 		osa.lsa_flags = oact.lsa_flags;
465 		osa.lsa_restorer = oact.lsa_restorer;
466 		osa.lsa_mask = oact.lsa_mask.__mask;
467 		error = copyout(&osa, args->osa, sizeof(l_osigaction_t));
468 	}
469 
470 	return (error);
471 }
472 
473 /*
474  * Linux has two extra args, restart and oldmask.  We dont use these,
475  * but it seems that "restart" is actually a context pointer that
476  * enables the signal to happen with a different register set.
477  */
478 int
479 linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args)
480 {
481 	sigset_t sigmask;
482 	l_sigset_t mask;
483 
484 	LINUX_SIGEMPTYSET(mask);
485 	mask.__mask = args->mask;
486 	linux_to_bsd_sigset(&mask, &sigmask);
487 	return (kern_sigsuspend(td, sigmask));
488 }
489 
490 int
491 linux_pause(struct thread *td, struct linux_pause_args *args)
492 {
493 	struct proc *p = td->td_proc;
494 	sigset_t sigmask;
495 
496 	PROC_LOCK(p);
497 	sigmask = td->td_sigmask;
498 	PROC_UNLOCK(p);
499 	return (kern_sigsuspend(td, sigmask));
500 }
501 
502 int
503 linux_set_thread_area(struct thread *td, struct linux_set_thread_area_args *args)
504 {
505 	struct l_user_desc info;
506 	int error;
507 	int idx;
508 	int a[2];
509 	struct segment_descriptor sd;
510 
511 	error = copyin(args->desc, &info, sizeof(struct l_user_desc));
512 	if (error)
513 		return (error);
514 
515 	idx = info.entry_number;
516 	/*
517 	 * Semantics of Linux version: every thread in the system has array of
518 	 * 3 tls descriptors. 1st is GLIBC TLS, 2nd is WINE, 3rd unknown. This
519 	 * syscall loads one of the selected tls descriptors with a value and
520 	 * also loads GDT descriptors 6, 7 and 8 with the content of the
521 	 * per-thread descriptors.
522 	 *
523 	 * Semantics of FreeBSD version: I think we can ignore that Linux has 3
524 	 * per-thread descriptors and use just the 1st one. The tls_array[]
525 	 * is used only in set/get-thread_area() syscalls and for loading the
526 	 * GDT descriptors. In FreeBSD we use just one GDT descriptor for TLS
527 	 * so we will load just one.
528 	 *
529 	 * XXX: this doesn't work when a user space process tries to use more
530 	 * than 1 TLS segment. Comment in the Linux sources says wine might do
531 	 * this.
532 	 */
533 
534 	/*
535 	 * we support just GLIBC TLS now
536 	 * we should let 3 proceed as well because we use this segment so
537 	 * if code does two subsequent calls it should succeed
538 	 */
539 	if (idx != 6 && idx != -1 && idx != 3)
540 		return (EINVAL);
541 
542 	/*
543 	 * we have to copy out the GDT entry we use
544 	 * FreeBSD uses GDT entry #3 for storing %gs so load that
545 	 *
546 	 * XXX: what if a user space program doesn't check this value and tries
547 	 * to use 6, 7 or 8?
548 	 */
549 	idx = info.entry_number = 3;
550 	error = copyout(&info, args->desc, sizeof(struct l_user_desc));
551 	if (error)
552 		return (error);
553 
554 	if (LINUX_LDT_empty(&info)) {
555 		a[0] = 0;
556 		a[1] = 0;
557 	} else {
558 		a[0] = LINUX_LDT_entry_a(&info);
559 		a[1] = LINUX_LDT_entry_b(&info);
560 	}
561 
562 	memcpy(&sd, &a, sizeof(a));
563 	/* this is taken from i386 version of cpu_set_user_tls() */
564 	critical_enter();
565 	/* set %gs */
566 	td->td_pcb->pcb_gsd = sd;
567 	PCPU_GET(fsgs_gdt)[1] = sd;
568 	load_gs(GSEL(GUGS_SEL, SEL_UPL));
569 	critical_exit();
570 
571 	return (0);
572 }
573 
574 int
575 linux_get_thread_area(struct thread *td, struct linux_get_thread_area_args *args)
576 {
577 
578 	struct l_user_desc info;
579 	int error;
580 	int idx;
581 	struct l_desc_struct desc;
582 	struct segment_descriptor sd;
583 
584 	error = copyin(args->desc, &info, sizeof(struct l_user_desc));
585 	if (error)
586 		return (error);
587 
588 	idx = info.entry_number;
589 	/* XXX: I am not sure if we want 3 to be allowed too. */
590 	if (idx != 6 && idx != 3)
591 		return (EINVAL);
592 
593 	idx = 3;
594 
595 	memset(&info, 0, sizeof(info));
596 
597 	sd = PCPU_GET(fsgs_gdt)[1];
598 
599 	memcpy(&desc, &sd, sizeof(desc));
600 
601 	info.entry_number = idx;
602 	info.base_addr = LINUX_GET_BASE(&desc);
603 	info.limit = LINUX_GET_LIMIT(&desc);
604 	info.seg_32bit = LINUX_GET_32BIT(&desc);
605 	info.contents = LINUX_GET_CONTENTS(&desc);
606 	info.read_exec_only = !LINUX_GET_WRITABLE(&desc);
607 	info.limit_in_pages = LINUX_GET_LIMIT_PAGES(&desc);
608 	info.seg_not_present = !LINUX_GET_PRESENT(&desc);
609 	info.useable = LINUX_GET_USEABLE(&desc);
610 
611 	error = copyout(&info, args->desc, sizeof(struct l_user_desc));
612 	if (error)
613 		return (EFAULT);
614 
615 	return (0);
616 }
617 
618 /* XXX: this wont work with module - convert it */
619 int
620 linux_mq_open(struct thread *td, struct linux_mq_open_args *args)
621 {
622 #ifdef P1003_1B_MQUEUE
623 	return (sys_kmq_open(td, (struct kmq_open_args *)args));
624 #else
625 	return (ENOSYS);
626 #endif
627 }
628 
629 int
630 linux_mq_unlink(struct thread *td, struct linux_mq_unlink_args *args)
631 {
632 #ifdef P1003_1B_MQUEUE
633 	return (sys_kmq_unlink(td, (struct kmq_unlink_args *)args));
634 #else
635 	return (ENOSYS);
636 #endif
637 }
638 
639 int
640 linux_mq_timedsend(struct thread *td, struct linux_mq_timedsend_args *args)
641 {
642 #ifdef P1003_1B_MQUEUE
643 	return (sys_kmq_timedsend(td, (struct kmq_timedsend_args *)args));
644 #else
645 	return (ENOSYS);
646 #endif
647 }
648 
649 int
650 linux_mq_timedreceive(struct thread *td, struct linux_mq_timedreceive_args *args)
651 {
652 #ifdef P1003_1B_MQUEUE
653 	return (sys_kmq_timedreceive(td, (struct kmq_timedreceive_args *)args));
654 #else
655 	return (ENOSYS);
656 #endif
657 }
658 
659 int
660 linux_mq_notify(struct thread *td, struct linux_mq_notify_args *args)
661 {
662 #ifdef P1003_1B_MQUEUE
663 	return (sys_kmq_notify(td, (struct kmq_notify_args *)args));
664 #else
665 	return (ENOSYS);
666 #endif
667 }
668 
669 int
670 linux_mq_getsetattr(struct thread *td, struct linux_mq_getsetattr_args *args)
671 {
672 #ifdef P1003_1B_MQUEUE
673 	return (sys_kmq_setattr(td, (struct kmq_setattr_args *)args));
674 #else
675 	return (ENOSYS);
676 #endif
677 }
678 
679 void
680 bsd_to_linux_regset(const struct reg *b_reg,
681     struct linux_pt_regset *l_regset)
682 {
683 
684 	l_regset->ebx = b_reg->r_ebx;
685 	l_regset->ecx = b_reg->r_ecx;
686 	l_regset->edx = b_reg->r_edx;
687 	l_regset->esi = b_reg->r_esi;
688 	l_regset->edi = b_reg->r_edi;
689 	l_regset->ebp = b_reg->r_ebp;
690 	l_regset->eax = b_reg->r_eax;
691 	l_regset->ds = b_reg->r_ds;
692 	l_regset->es = b_reg->r_es;
693 	l_regset->fs = b_reg->r_fs;
694 	l_regset->gs = b_reg->r_gs;
695 	l_regset->orig_eax = b_reg->r_eax;
696 	l_regset->eip = b_reg->r_eip;
697 	l_regset->cs = b_reg->r_cs;
698 	l_regset->eflags = b_reg->r_eflags;
699 	l_regset->esp = b_reg->r_esp;
700 	l_regset->ss = b_reg->r_ss;
701 }
702 
703 int
704 linux_uselib(struct thread *td, struct linux_uselib_args *args)
705 {
706 	struct nameidata ni;
707 	struct vnode *vp;
708 	struct exec *a_out;
709 	vm_map_t map;
710 	vm_map_entry_t entry;
711 	struct vattr attr;
712 	vm_offset_t vmaddr;
713 	unsigned long file_offset;
714 	unsigned long bss_size;
715 	ssize_t aresid;
716 	int error;
717 	bool locked, opened, textset;
718 
719 	a_out = NULL;
720 	vp = NULL;
721 	locked = false;
722 	textset = false;
723 	opened = false;
724 
725 	NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1,
726 	    UIO_USERSPACE, args->library);
727 	error = namei(&ni);
728 	if (error)
729 		goto cleanup;
730 
731 	vp = ni.ni_vp;
732 	NDFREE_PNBUF(&ni);
733 
734 	/*
735 	 * From here on down, we have a locked vnode that must be unlocked.
736 	 * XXX: The code below largely duplicates exec_check_permissions().
737 	 */
738 	locked = true;
739 
740 	/* Executable? */
741 	error = VOP_GETATTR(vp, &attr, td->td_ucred);
742 	if (error)
743 		goto cleanup;
744 
745 	if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
746 	    ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) {
747 		/* EACCESS is what exec(2) returns. */
748 		error = ENOEXEC;
749 		goto cleanup;
750 	}
751 
752 	/* Sensible size? */
753 	if (attr.va_size == 0) {
754 		error = ENOEXEC;
755 		goto cleanup;
756 	}
757 
758 	/* Can we access it? */
759 	error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
760 	if (error)
761 		goto cleanup;
762 
763 	/*
764 	 * XXX: This should use vn_open() so that it is properly authorized,
765 	 * and to reduce code redundancy all over the place here.
766 	 * XXX: Not really, it duplicates far more of exec_check_permissions()
767 	 * than vn_open().
768 	 */
769 #ifdef MAC
770 	error = mac_vnode_check_open(td->td_ucred, vp, VREAD);
771 	if (error)
772 		goto cleanup;
773 #endif
774 	error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
775 	if (error)
776 		goto cleanup;
777 	opened = true;
778 
779 	/* Pull in executable header into exec_map */
780 	error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE,
781 	    VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0);
782 	if (error)
783 		goto cleanup;
784 
785 	/* Is it a Linux binary ? */
786 	if (((a_out->a_magic >> 16) & 0xff) != 0x64) {
787 		error = ENOEXEC;
788 		goto cleanup;
789 	}
790 
791 	/*
792 	 * While we are here, we should REALLY do some more checks
793 	 */
794 
795 	/* Set file/virtual offset based on a.out variant. */
796 	switch ((int)(a_out->a_magic & 0xffff)) {
797 	case 0413:			/* ZMAGIC */
798 		file_offset = 1024;
799 		break;
800 	case 0314:			/* QMAGIC */
801 		file_offset = 0;
802 		break;
803 	default:
804 		error = ENOEXEC;
805 		goto cleanup;
806 	}
807 
808 	bss_size = round_page(a_out->a_bss);
809 
810 	/* Check various fields in header for validity/bounds. */
811 	if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) {
812 		error = ENOEXEC;
813 		goto cleanup;
814 	}
815 
816 	/* text + data can't exceed file size */
817 	if (a_out->a_data + a_out->a_text > attr.va_size) {
818 		error = EFAULT;
819 		goto cleanup;
820 	}
821 
822 	/*
823 	 * text/data/bss must not exceed limits
824 	 * XXX - this is not complete. it should check current usage PLUS
825 	 * the resources needed by this library.
826 	 */
827 	PROC_LOCK(td->td_proc);
828 	if (a_out->a_text > maxtsiz ||
829 	    a_out->a_data + bss_size > lim_cur_proc(td->td_proc, RLIMIT_DATA) ||
830 	    racct_set(td->td_proc, RACCT_DATA, a_out->a_data +
831 	    bss_size) != 0) {
832 		PROC_UNLOCK(td->td_proc);
833 		error = ENOMEM;
834 		goto cleanup;
835 	}
836 	PROC_UNLOCK(td->td_proc);
837 
838 	/*
839 	 * Prevent more writers.
840 	 */
841 	error = VOP_SET_TEXT(vp);
842 	if (error != 0)
843 		goto cleanup;
844 	textset = true;
845 
846 	/*
847 	 * Lock no longer needed
848 	 */
849 	locked = false;
850 	VOP_UNLOCK(vp);
851 
852 	/*
853 	 * Check if file_offset page aligned. Currently we cannot handle
854 	 * misalinged file offsets, and so we read in the entire image
855 	 * (what a waste).
856 	 */
857 	if (file_offset & PAGE_MASK) {
858 		/* Map text+data read/write/execute */
859 
860 		/* a_entry is the load address and is page aligned */
861 		vmaddr = trunc_page(a_out->a_entry);
862 
863 		/* get anon user mapping, read+write+execute */
864 		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
865 		    &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE,
866 		    VM_PROT_ALL, VM_PROT_ALL, 0);
867 		if (error)
868 			goto cleanup;
869 
870 		error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset,
871 		    a_out->a_text + a_out->a_data, UIO_USERSPACE, 0,
872 		    td->td_ucred, NOCRED, &aresid, td);
873 		if (error != 0)
874 			goto cleanup;
875 		if (aresid != 0) {
876 			error = ENOEXEC;
877 			goto cleanup;
878 		}
879 	} else {
880 		/*
881 		 * for QMAGIC, a_entry is 20 bytes beyond the load address
882 		 * to skip the executable header
883 		 */
884 		vmaddr = trunc_page(a_out->a_entry);
885 
886 		/*
887 		 * Map it all into the process's space as a single
888 		 * copy-on-write "data" segment.
889 		 */
890 		map = &td->td_proc->p_vmspace->vm_map;
891 		error = vm_mmap(map, &vmaddr,
892 		    a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL,
893 		    MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset);
894 		if (error)
895 			goto cleanup;
896 		vm_map_lock(map);
897 		if (!vm_map_lookup_entry(map, vmaddr, &entry)) {
898 			vm_map_unlock(map);
899 			error = EDOOFUS;
900 			goto cleanup;
901 		}
902 		entry->eflags |= MAP_ENTRY_VN_EXEC;
903 		vm_map_unlock(map);
904 		textset = false;
905 	}
906 
907 	if (bss_size != 0) {
908 		/* Calculate BSS start address */
909 		vmaddr = trunc_page(a_out->a_entry) + a_out->a_text +
910 		    a_out->a_data;
911 
912 		/* allocate some 'anon' space */
913 		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
914 		    &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL,
915 		    VM_PROT_ALL, 0);
916 		if (error)
917 			goto cleanup;
918 	}
919 
920 cleanup:
921 	if (opened) {
922 		if (locked)
923 			VOP_UNLOCK(vp);
924 		locked = false;
925 		VOP_CLOSE(vp, FREAD, td->td_ucred, td);
926 	}
927 	if (textset) {
928 		if (!locked) {
929 			locked = true;
930 			VOP_LOCK(vp, LK_SHARED | LK_RETRY);
931 		}
932 		VOP_UNSET_TEXT_CHECKED(vp);
933 	}
934 	if (locked)
935 		VOP_UNLOCK(vp);
936 
937 	/* Release the temporary mapping. */
938 	if (a_out)
939 		kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE);
940 
941 	return (error);
942 }
943