xref: /freebsd/sys/i386/linux/linux_machdep.c (revision d0b2dbfa)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2000 Marcel Moolenaar
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include "opt_posix.h"
31 
32 #include <sys/param.h>
33 #include <sys/imgact_aout.h>
34 #include <sys/fcntl.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mman.h>
38 #include <sys/mutex.h>
39 #include <sys/namei.h>
40 #include <sys/priv.h>
41 #include <sys/proc.h>
42 #include <sys/racct.h>
43 #include <sys/resource.h>
44 #include <sys/resourcevar.h>
45 #include <sys/syscallsubr.h>
46 #include <sys/sysproto.h>
47 #include <sys/vnode.h>
48 
49 #include <security/audit/audit.h>
50 #include <security/mac/mac_framework.h>
51 
52 #include <machine/frame.h>
53 #include <machine/pcb.h>			/* needed for pcb definition in linux_set_thread_area */
54 #include <machine/psl.h>
55 #include <machine/segments.h>
56 #include <machine/sysarch.h>
57 
58 #include <vm/pmap.h>
59 #include <vm/vm.h>
60 #include <vm/vm_extern.h>
61 #include <vm/vm_kern.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_param.h>
64 
65 #include <x86/reg.h>
66 
67 #include <i386/linux/linux.h>
68 #include <i386/linux/linux_proto.h>
69 #include <compat/linux/linux_emul.h>
70 #include <compat/linux/linux_fork.h>
71 #include <compat/linux/linux_ipc.h>
72 #include <compat/linux/linux_misc.h>
73 #include <compat/linux/linux_mmap.h>
74 #include <compat/linux/linux_signal.h>
75 #include <compat/linux/linux_util.h>
76 
77 
78 struct l_descriptor {
79 	l_uint		entry_number;
80 	l_ulong		base_addr;
81 	l_uint		limit;
82 	l_uint		seg_32bit:1;
83 	l_uint		contents:2;
84 	l_uint		read_exec_only:1;
85 	l_uint		limit_in_pages:1;
86 	l_uint		seg_not_present:1;
87 	l_uint		useable:1;
88 };
89 
90 struct l_old_select_argv {
91 	l_int		nfds;
92 	l_fd_set	*readfds;
93 	l_fd_set	*writefds;
94 	l_fd_set	*exceptfds;
95 	struct l_timeval	*timeout;
96 };
97 
98 struct l_ipc_kludge {
99 	struct l_msgbuf *msgp;
100 	l_long msgtyp;
101 };
102 
103 int
104 linux_ipc(struct thread *td, struct linux_ipc_args *args)
105 {
106 
107 	switch (args->what & 0xFFFF) {
108 	case LINUX_SEMOP: {
109 
110 		return (kern_semop(td, args->arg1, PTRIN(args->ptr),
111 		    args->arg2, NULL));
112 	}
113 	case LINUX_SEMGET: {
114 		struct linux_semget_args a;
115 
116 		a.key = args->arg1;
117 		a.nsems = args->arg2;
118 		a.semflg = args->arg3;
119 		return (linux_semget(td, &a));
120 	}
121 	case LINUX_SEMCTL: {
122 		struct linux_semctl_args a;
123 		int error;
124 
125 		a.semid = args->arg1;
126 		a.semnum = args->arg2;
127 		a.cmd = args->arg3;
128 		error = copyin(PTRIN(args->ptr), &a.arg, sizeof(a.arg));
129 		if (error)
130 			return (error);
131 		return (linux_semctl(td, &a));
132 	}
133 	case LINUX_SEMTIMEDOP: {
134 		struct linux_semtimedop_args a;
135 
136 		a.semid = args->arg1;
137 		a.tsops = PTRIN(args->ptr);
138 		a.nsops = args->arg2;
139 		a.timeout = PTRIN(args->arg5);
140 		return (linux_semtimedop(td, &a));
141 	}
142 	case LINUX_MSGSND: {
143 		struct linux_msgsnd_args a;
144 
145 		a.msqid = args->arg1;
146 		a.msgp = PTRIN(args->ptr);
147 		a.msgsz = args->arg2;
148 		a.msgflg = args->arg3;
149 		return (linux_msgsnd(td, &a));
150 	}
151 	case LINUX_MSGRCV: {
152 		struct linux_msgrcv_args a;
153 
154 		a.msqid = args->arg1;
155 		a.msgsz = args->arg2;
156 		a.msgflg = args->arg3;
157 		if ((args->what >> 16) == 0) {
158 			struct l_ipc_kludge tmp;
159 			int error;
160 
161 			if (args->ptr == 0)
162 				return (EINVAL);
163 			error = copyin(PTRIN(args->ptr), &tmp, sizeof(tmp));
164 			if (error)
165 				return (error);
166 			a.msgp = PTRIN(tmp.msgp);
167 			a.msgtyp = tmp.msgtyp;
168 		} else {
169 			a.msgp = PTRIN(args->ptr);
170 			a.msgtyp = args->arg5;
171 		}
172 		return (linux_msgrcv(td, &a));
173 	}
174 	case LINUX_MSGGET: {
175 		struct linux_msgget_args a;
176 
177 		a.key = args->arg1;
178 		a.msgflg = args->arg2;
179 		return (linux_msgget(td, &a));
180 	}
181 	case LINUX_MSGCTL: {
182 		struct linux_msgctl_args a;
183 
184 		a.msqid = args->arg1;
185 		a.cmd = args->arg2;
186 		a.buf = PTRIN(args->ptr);
187 		return (linux_msgctl(td, &a));
188 	}
189 	case LINUX_SHMAT: {
190 		struct linux_shmat_args a;
191 		l_uintptr_t addr;
192 		int error;
193 
194 		a.shmid = args->arg1;
195 		a.shmaddr = PTRIN(args->ptr);
196 		a.shmflg = args->arg2;
197 		error = linux_shmat(td, &a);
198 		if (error != 0)
199 			return (error);
200 		addr = td->td_retval[0];
201 		error = copyout(&addr, PTRIN(args->arg3), sizeof(addr));
202 		td->td_retval[0] = 0;
203 		return (error);
204 	}
205 	case LINUX_SHMDT: {
206 		struct linux_shmdt_args a;
207 
208 		a.shmaddr = PTRIN(args->ptr);
209 		return (linux_shmdt(td, &a));
210 	}
211 	case LINUX_SHMGET: {
212 		struct linux_shmget_args a;
213 
214 		a.key = args->arg1;
215 		a.size = args->arg2;
216 		a.shmflg = args->arg3;
217 		return (linux_shmget(td, &a));
218 	}
219 	case LINUX_SHMCTL: {
220 		struct linux_shmctl_args a;
221 
222 		a.shmid = args->arg1;
223 		a.cmd = args->arg2;
224 		a.buf = PTRIN(args->ptr);
225 		return (linux_shmctl(td, &a));
226 	}
227 	default:
228 		break;
229 	}
230 
231 	return (EINVAL);
232 }
233 
234 int
235 linux_old_select(struct thread *td, struct linux_old_select_args *args)
236 {
237 	struct l_old_select_argv linux_args;
238 	struct linux_select_args newsel;
239 	int error;
240 
241 	error = copyin(args->ptr, &linux_args, sizeof(linux_args));
242 	if (error)
243 		return (error);
244 
245 	newsel.nfds = linux_args.nfds;
246 	newsel.readfds = linux_args.readfds;
247 	newsel.writefds = linux_args.writefds;
248 	newsel.exceptfds = linux_args.exceptfds;
249 	newsel.timeout = linux_args.timeout;
250 	return (linux_select(td, &newsel));
251 }
252 
253 int
254 linux_set_cloned_tls(struct thread *td, void *desc)
255 {
256 	struct segment_descriptor sd;
257 	struct l_user_desc info;
258 	int idx, error;
259 	int a[2];
260 
261 	error = copyin(desc, &info, sizeof(struct l_user_desc));
262 	if (error) {
263 		linux_msg(td, "set_cloned_tls copyin failed!");
264 	} else {
265 		idx = info.entry_number;
266 
267 		/*
268 		 * looks like we're getting the idx we returned
269 		 * in the set_thread_area() syscall
270 		 */
271 		if (idx != 6 && idx != 3) {
272 			linux_msg(td, "set_cloned_tls resetting idx!");
273 			idx = 3;
274 		}
275 
276 		/* this doesnt happen in practice */
277 		if (idx == 6) {
278 			/* we might copy out the entry_number as 3 */
279 			info.entry_number = 3;
280 			error = copyout(&info, desc, sizeof(struct l_user_desc));
281 			if (error)
282 				linux_msg(td, "set_cloned_tls copyout failed!");
283 		}
284 
285 		a[0] = LINUX_LDT_entry_a(&info);
286 		a[1] = LINUX_LDT_entry_b(&info);
287 
288 		memcpy(&sd, &a, sizeof(a));
289 		/* set %gs */
290 		td->td_pcb->pcb_gsd = sd;
291 		td->td_pcb->pcb_gs = GSEL(GUGS_SEL, SEL_UPL);
292 	}
293 
294 	return (error);
295 }
296 
297 int
298 linux_set_upcall(struct thread *td, register_t stack)
299 {
300 
301 	if (stack)
302 		td->td_frame->tf_esp = stack;
303 
304 	/*
305 	 * The newly created Linux thread returns
306 	 * to the user space by the same path that a parent do.
307 	 */
308 	td->td_frame->tf_eax = 0;
309 	return (0);
310 }
311 
312 int
313 linux_mmap2(struct thread *td, struct linux_mmap2_args *args)
314 {
315 
316 	return (linux_mmap_common(td, args->addr, args->len, args->prot,
317 		args->flags, args->fd, (uint64_t)(uint32_t)args->pgoff *
318 		PAGE_SIZE));
319 }
320 
321 int
322 linux_mmap(struct thread *td, struct linux_mmap_args *args)
323 {
324 	int error;
325 	struct l_mmap_argv linux_args;
326 
327 	error = copyin(args->ptr, &linux_args, sizeof(linux_args));
328 	if (error)
329 		return (error);
330 
331 	return (linux_mmap_common(td, linux_args.addr, linux_args.len,
332 	    linux_args.prot, linux_args.flags, linux_args.fd,
333 	    (uint32_t)linux_args.pgoff));
334 }
335 
336 int
337 linux_mprotect(struct thread *td, struct linux_mprotect_args *uap)
338 {
339 
340 	return (linux_mprotect_common(td, PTROUT(uap->addr), uap->len, uap->prot));
341 }
342 
343 int
344 linux_madvise(struct thread *td, struct linux_madvise_args *uap)
345 {
346 
347 	return (linux_madvise_common(td, PTROUT(uap->addr), uap->len, uap->behav));
348 }
349 
350 int
351 linux_ioperm(struct thread *td, struct linux_ioperm_args *args)
352 {
353 	int error;
354 	struct i386_ioperm_args iia;
355 
356 	iia.start = args->start;
357 	iia.length = args->length;
358 	iia.enable = args->enable;
359 	error = i386_set_ioperm(td, &iia);
360 	return (error);
361 }
362 
363 int
364 linux_iopl(struct thread *td, struct linux_iopl_args *args)
365 {
366 	int error;
367 
368 	if (args->level < 0 || args->level > 3)
369 		return (EINVAL);
370 	if ((error = priv_check(td, PRIV_IO)) != 0)
371 		return (error);
372 	if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
373 		return (error);
374 	td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) |
375 	    (args->level * (PSL_IOPL / 3));
376 	return (0);
377 }
378 
379 int
380 linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap)
381 {
382 	int error;
383 	struct i386_ldt_args ldt;
384 	struct l_descriptor ld;
385 	union descriptor desc;
386 	int size, written;
387 
388 	switch (uap->func) {
389 	case 0x00: /* read_ldt */
390 		ldt.start = 0;
391 		ldt.descs = uap->ptr;
392 		ldt.num = uap->bytecount / sizeof(union descriptor);
393 		error = i386_get_ldt(td, &ldt);
394 		td->td_retval[0] *= sizeof(union descriptor);
395 		break;
396 	case 0x02: /* read_default_ldt = 0 */
397 		size = 5*sizeof(struct l_desc_struct);
398 		if (size > uap->bytecount)
399 			size = uap->bytecount;
400 		for (written = error = 0; written < size && error == 0; written++)
401 			error = subyte((char *)uap->ptr + written, 0);
402 		td->td_retval[0] = written;
403 		break;
404 	case 0x01: /* write_ldt */
405 	case 0x11: /* write_ldt */
406 		if (uap->bytecount != sizeof(ld))
407 			return (EINVAL);
408 
409 		error = copyin(uap->ptr, &ld, sizeof(ld));
410 		if (error)
411 			return (error);
412 
413 		ldt.start = ld.entry_number;
414 		ldt.descs = &desc;
415 		ldt.num = 1;
416 		desc.sd.sd_lolimit = (ld.limit & 0x0000ffff);
417 		desc.sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16;
418 		desc.sd.sd_lobase = (ld.base_addr & 0x00ffffff);
419 		desc.sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24;
420 		desc.sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) |
421 			(ld.contents << 2);
422 		desc.sd.sd_dpl = 3;
423 		desc.sd.sd_p = (ld.seg_not_present ^ 1);
424 		desc.sd.sd_xx = 0;
425 		desc.sd.sd_def32 = ld.seg_32bit;
426 		desc.sd.sd_gran = ld.limit_in_pages;
427 		error = i386_set_ldt(td, &ldt, &desc);
428 		break;
429 	default:
430 		error = ENOSYS;
431 		break;
432 	}
433 
434 	if (error == EOPNOTSUPP) {
435 		linux_msg(td, "modify_ldt needs kernel option USER_LDT");
436 		error = ENOSYS;
437 	}
438 
439 	return (error);
440 }
441 
442 int
443 linux_sigaction(struct thread *td, struct linux_sigaction_args *args)
444 {
445 	l_osigaction_t osa;
446 	l_sigaction_t act, oact;
447 	int error;
448 
449 	if (args->nsa != NULL) {
450 		error = copyin(args->nsa, &osa, sizeof(l_osigaction_t));
451 		if (error)
452 			return (error);
453 		act.lsa_handler = osa.lsa_handler;
454 		act.lsa_flags = osa.lsa_flags;
455 		act.lsa_restorer = osa.lsa_restorer;
456 		LINUX_SIGEMPTYSET(act.lsa_mask);
457 		act.lsa_mask.__mask = osa.lsa_mask;
458 	}
459 
460 	error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL,
461 	    args->osa ? &oact : NULL);
462 
463 	if (args->osa != NULL && !error) {
464 		osa.lsa_handler = oact.lsa_handler;
465 		osa.lsa_flags = oact.lsa_flags;
466 		osa.lsa_restorer = oact.lsa_restorer;
467 		osa.lsa_mask = oact.lsa_mask.__mask;
468 		error = copyout(&osa, args->osa, sizeof(l_osigaction_t));
469 	}
470 
471 	return (error);
472 }
473 
474 /*
475  * Linux has two extra args, restart and oldmask.  We dont use these,
476  * but it seems that "restart" is actually a context pointer that
477  * enables the signal to happen with a different register set.
478  */
479 int
480 linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args)
481 {
482 	sigset_t sigmask;
483 	l_sigset_t mask;
484 
485 	LINUX_SIGEMPTYSET(mask);
486 	mask.__mask = args->mask;
487 	linux_to_bsd_sigset(&mask, &sigmask);
488 	return (kern_sigsuspend(td, sigmask));
489 }
490 
491 int
492 linux_pause(struct thread *td, struct linux_pause_args *args)
493 {
494 	struct proc *p = td->td_proc;
495 	sigset_t sigmask;
496 
497 	PROC_LOCK(p);
498 	sigmask = td->td_sigmask;
499 	PROC_UNLOCK(p);
500 	return (kern_sigsuspend(td, sigmask));
501 }
502 
503 int
504 linux_set_thread_area(struct thread *td, struct linux_set_thread_area_args *args)
505 {
506 	struct l_user_desc info;
507 	int error;
508 	int idx;
509 	int a[2];
510 	struct segment_descriptor sd;
511 
512 	error = copyin(args->desc, &info, sizeof(struct l_user_desc));
513 	if (error)
514 		return (error);
515 
516 	idx = info.entry_number;
517 	/*
518 	 * Semantics of Linux version: every thread in the system has array of
519 	 * 3 tls descriptors. 1st is GLIBC TLS, 2nd is WINE, 3rd unknown. This
520 	 * syscall loads one of the selected tls descriptors with a value and
521 	 * also loads GDT descriptors 6, 7 and 8 with the content of the
522 	 * per-thread descriptors.
523 	 *
524 	 * Semantics of FreeBSD version: I think we can ignore that Linux has 3
525 	 * per-thread descriptors and use just the 1st one. The tls_array[]
526 	 * is used only in set/get-thread_area() syscalls and for loading the
527 	 * GDT descriptors. In FreeBSD we use just one GDT descriptor for TLS
528 	 * so we will load just one.
529 	 *
530 	 * XXX: this doesn't work when a user space process tries to use more
531 	 * than 1 TLS segment. Comment in the Linux sources says wine might do
532 	 * this.
533 	 */
534 
535 	/*
536 	 * we support just GLIBC TLS now
537 	 * we should let 3 proceed as well because we use this segment so
538 	 * if code does two subsequent calls it should succeed
539 	 */
540 	if (idx != 6 && idx != -1 && idx != 3)
541 		return (EINVAL);
542 
543 	/*
544 	 * we have to copy out the GDT entry we use
545 	 * FreeBSD uses GDT entry #3 for storing %gs so load that
546 	 *
547 	 * XXX: what if a user space program doesn't check this value and tries
548 	 * to use 6, 7 or 8?
549 	 */
550 	idx = info.entry_number = 3;
551 	error = copyout(&info, args->desc, sizeof(struct l_user_desc));
552 	if (error)
553 		return (error);
554 
555 	if (LINUX_LDT_empty(&info)) {
556 		a[0] = 0;
557 		a[1] = 0;
558 	} else {
559 		a[0] = LINUX_LDT_entry_a(&info);
560 		a[1] = LINUX_LDT_entry_b(&info);
561 	}
562 
563 	memcpy(&sd, &a, sizeof(a));
564 	/* this is taken from i386 version of cpu_set_user_tls() */
565 	critical_enter();
566 	/* set %gs */
567 	td->td_pcb->pcb_gsd = sd;
568 	PCPU_GET(fsgs_gdt)[1] = sd;
569 	load_gs(GSEL(GUGS_SEL, SEL_UPL));
570 	critical_exit();
571 
572 	return (0);
573 }
574 
575 int
576 linux_get_thread_area(struct thread *td, struct linux_get_thread_area_args *args)
577 {
578 
579 	struct l_user_desc info;
580 	int error;
581 	int idx;
582 	struct l_desc_struct desc;
583 	struct segment_descriptor sd;
584 
585 	error = copyin(args->desc, &info, sizeof(struct l_user_desc));
586 	if (error)
587 		return (error);
588 
589 	idx = info.entry_number;
590 	/* XXX: I am not sure if we want 3 to be allowed too. */
591 	if (idx != 6 && idx != 3)
592 		return (EINVAL);
593 
594 	idx = 3;
595 
596 	memset(&info, 0, sizeof(info));
597 
598 	sd = PCPU_GET(fsgs_gdt)[1];
599 
600 	memcpy(&desc, &sd, sizeof(desc));
601 
602 	info.entry_number = idx;
603 	info.base_addr = LINUX_GET_BASE(&desc);
604 	info.limit = LINUX_GET_LIMIT(&desc);
605 	info.seg_32bit = LINUX_GET_32BIT(&desc);
606 	info.contents = LINUX_GET_CONTENTS(&desc);
607 	info.read_exec_only = !LINUX_GET_WRITABLE(&desc);
608 	info.limit_in_pages = LINUX_GET_LIMIT_PAGES(&desc);
609 	info.seg_not_present = !LINUX_GET_PRESENT(&desc);
610 	info.useable = LINUX_GET_USEABLE(&desc);
611 
612 	error = copyout(&info, args->desc, sizeof(struct l_user_desc));
613 	if (error)
614 		return (EFAULT);
615 
616 	return (0);
617 }
618 
619 /* XXX: this wont work with module - convert it */
620 int
621 linux_mq_open(struct thread *td, struct linux_mq_open_args *args)
622 {
623 #ifdef P1003_1B_MQUEUE
624 	return (sys_kmq_open(td, (struct kmq_open_args *)args));
625 #else
626 	return (ENOSYS);
627 #endif
628 }
629 
630 int
631 linux_mq_unlink(struct thread *td, struct linux_mq_unlink_args *args)
632 {
633 #ifdef P1003_1B_MQUEUE
634 	return (sys_kmq_unlink(td, (struct kmq_unlink_args *)args));
635 #else
636 	return (ENOSYS);
637 #endif
638 }
639 
640 int
641 linux_mq_timedsend(struct thread *td, struct linux_mq_timedsend_args *args)
642 {
643 #ifdef P1003_1B_MQUEUE
644 	return (sys_kmq_timedsend(td, (struct kmq_timedsend_args *)args));
645 #else
646 	return (ENOSYS);
647 #endif
648 }
649 
650 int
651 linux_mq_timedreceive(struct thread *td, struct linux_mq_timedreceive_args *args)
652 {
653 #ifdef P1003_1B_MQUEUE
654 	return (sys_kmq_timedreceive(td, (struct kmq_timedreceive_args *)args));
655 #else
656 	return (ENOSYS);
657 #endif
658 }
659 
660 int
661 linux_mq_notify(struct thread *td, struct linux_mq_notify_args *args)
662 {
663 #ifdef P1003_1B_MQUEUE
664 	return (sys_kmq_notify(td, (struct kmq_notify_args *)args));
665 #else
666 	return (ENOSYS);
667 #endif
668 }
669 
670 int
671 linux_mq_getsetattr(struct thread *td, struct linux_mq_getsetattr_args *args)
672 {
673 #ifdef P1003_1B_MQUEUE
674 	return (sys_kmq_setattr(td, (struct kmq_setattr_args *)args));
675 #else
676 	return (ENOSYS);
677 #endif
678 }
679 
680 void
681 bsd_to_linux_regset(const struct reg *b_reg,
682     struct linux_pt_regset *l_regset)
683 {
684 
685 	l_regset->ebx = b_reg->r_ebx;
686 	l_regset->ecx = b_reg->r_ecx;
687 	l_regset->edx = b_reg->r_edx;
688 	l_regset->esi = b_reg->r_esi;
689 	l_regset->edi = b_reg->r_edi;
690 	l_regset->ebp = b_reg->r_ebp;
691 	l_regset->eax = b_reg->r_eax;
692 	l_regset->ds = b_reg->r_ds;
693 	l_regset->es = b_reg->r_es;
694 	l_regset->fs = b_reg->r_fs;
695 	l_regset->gs = b_reg->r_gs;
696 	l_regset->orig_eax = b_reg->r_eax;
697 	l_regset->eip = b_reg->r_eip;
698 	l_regset->cs = b_reg->r_cs;
699 	l_regset->eflags = b_reg->r_eflags;
700 	l_regset->esp = b_reg->r_esp;
701 	l_regset->ss = b_reg->r_ss;
702 }
703 
704 int
705 linux_uselib(struct thread *td, struct linux_uselib_args *args)
706 {
707 	struct nameidata ni;
708 	struct vnode *vp;
709 	struct exec *a_out;
710 	vm_map_t map;
711 	vm_map_entry_t entry;
712 	struct vattr attr;
713 	vm_offset_t vmaddr;
714 	unsigned long file_offset;
715 	unsigned long bss_size;
716 	ssize_t aresid;
717 	int error;
718 	bool locked, opened, textset;
719 
720 	a_out = NULL;
721 	vp = NULL;
722 	locked = false;
723 	textset = false;
724 	opened = false;
725 
726 	NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1,
727 	    UIO_USERSPACE, args->library);
728 	error = namei(&ni);
729 	if (error)
730 		goto cleanup;
731 
732 	vp = ni.ni_vp;
733 	NDFREE_PNBUF(&ni);
734 
735 	/*
736 	 * From here on down, we have a locked vnode that must be unlocked.
737 	 * XXX: The code below largely duplicates exec_check_permissions().
738 	 */
739 	locked = true;
740 
741 	/* Executable? */
742 	error = VOP_GETATTR(vp, &attr, td->td_ucred);
743 	if (error)
744 		goto cleanup;
745 
746 	if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
747 	    ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) {
748 		/* EACCESS is what exec(2) returns. */
749 		error = ENOEXEC;
750 		goto cleanup;
751 	}
752 
753 	/* Sensible size? */
754 	if (attr.va_size == 0) {
755 		error = ENOEXEC;
756 		goto cleanup;
757 	}
758 
759 	/* Can we access it? */
760 	error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
761 	if (error)
762 		goto cleanup;
763 
764 	/*
765 	 * XXX: This should use vn_open() so that it is properly authorized,
766 	 * and to reduce code redundancy all over the place here.
767 	 * XXX: Not really, it duplicates far more of exec_check_permissions()
768 	 * than vn_open().
769 	 */
770 #ifdef MAC
771 	error = mac_vnode_check_open(td->td_ucred, vp, VREAD);
772 	if (error)
773 		goto cleanup;
774 #endif
775 	error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
776 	if (error)
777 		goto cleanup;
778 	opened = true;
779 
780 	/* Pull in executable header into exec_map */
781 	error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE,
782 	    VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0);
783 	if (error)
784 		goto cleanup;
785 
786 	/* Is it a Linux binary ? */
787 	if (((a_out->a_magic >> 16) & 0xff) != 0x64) {
788 		error = ENOEXEC;
789 		goto cleanup;
790 	}
791 
792 	/*
793 	 * While we are here, we should REALLY do some more checks
794 	 */
795 
796 	/* Set file/virtual offset based on a.out variant. */
797 	switch ((int)(a_out->a_magic & 0xffff)) {
798 	case 0413:			/* ZMAGIC */
799 		file_offset = 1024;
800 		break;
801 	case 0314:			/* QMAGIC */
802 		file_offset = 0;
803 		break;
804 	default:
805 		error = ENOEXEC;
806 		goto cleanup;
807 	}
808 
809 	bss_size = round_page(a_out->a_bss);
810 
811 	/* Check various fields in header for validity/bounds. */
812 	if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) {
813 		error = ENOEXEC;
814 		goto cleanup;
815 	}
816 
817 	/* text + data can't exceed file size */
818 	if (a_out->a_data + a_out->a_text > attr.va_size) {
819 		error = EFAULT;
820 		goto cleanup;
821 	}
822 
823 	/*
824 	 * text/data/bss must not exceed limits
825 	 * XXX - this is not complete. it should check current usage PLUS
826 	 * the resources needed by this library.
827 	 */
828 	PROC_LOCK(td->td_proc);
829 	if (a_out->a_text > maxtsiz ||
830 	    a_out->a_data + bss_size > lim_cur_proc(td->td_proc, RLIMIT_DATA) ||
831 	    racct_set(td->td_proc, RACCT_DATA, a_out->a_data +
832 	    bss_size) != 0) {
833 		PROC_UNLOCK(td->td_proc);
834 		error = ENOMEM;
835 		goto cleanup;
836 	}
837 	PROC_UNLOCK(td->td_proc);
838 
839 	/*
840 	 * Prevent more writers.
841 	 */
842 	error = VOP_SET_TEXT(vp);
843 	if (error != 0)
844 		goto cleanup;
845 	textset = true;
846 
847 	/*
848 	 * Lock no longer needed
849 	 */
850 	locked = false;
851 	VOP_UNLOCK(vp);
852 
853 	/*
854 	 * Check if file_offset page aligned. Currently we cannot handle
855 	 * misalinged file offsets, and so we read in the entire image
856 	 * (what a waste).
857 	 */
858 	if (file_offset & PAGE_MASK) {
859 		/* Map text+data read/write/execute */
860 
861 		/* a_entry is the load address and is page aligned */
862 		vmaddr = trunc_page(a_out->a_entry);
863 
864 		/* get anon user mapping, read+write+execute */
865 		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
866 		    &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE,
867 		    VM_PROT_ALL, VM_PROT_ALL, 0);
868 		if (error)
869 			goto cleanup;
870 
871 		error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset,
872 		    a_out->a_text + a_out->a_data, UIO_USERSPACE, 0,
873 		    td->td_ucred, NOCRED, &aresid, td);
874 		if (error != 0)
875 			goto cleanup;
876 		if (aresid != 0) {
877 			error = ENOEXEC;
878 			goto cleanup;
879 		}
880 	} else {
881 		/*
882 		 * for QMAGIC, a_entry is 20 bytes beyond the load address
883 		 * to skip the executable header
884 		 */
885 		vmaddr = trunc_page(a_out->a_entry);
886 
887 		/*
888 		 * Map it all into the process's space as a single
889 		 * copy-on-write "data" segment.
890 		 */
891 		map = &td->td_proc->p_vmspace->vm_map;
892 		error = vm_mmap(map, &vmaddr,
893 		    a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL,
894 		    MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset);
895 		if (error)
896 			goto cleanup;
897 		vm_map_lock(map);
898 		if (!vm_map_lookup_entry(map, vmaddr, &entry)) {
899 			vm_map_unlock(map);
900 			error = EDOOFUS;
901 			goto cleanup;
902 		}
903 		entry->eflags |= MAP_ENTRY_VN_EXEC;
904 		vm_map_unlock(map);
905 		textset = false;
906 	}
907 
908 	if (bss_size != 0) {
909 		/* Calculate BSS start address */
910 		vmaddr = trunc_page(a_out->a_entry) + a_out->a_text +
911 		    a_out->a_data;
912 
913 		/* allocate some 'anon' space */
914 		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
915 		    &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL,
916 		    VM_PROT_ALL, 0);
917 		if (error)
918 			goto cleanup;
919 	}
920 
921 cleanup:
922 	if (opened) {
923 		if (locked)
924 			VOP_UNLOCK(vp);
925 		locked = false;
926 		VOP_CLOSE(vp, FREAD, td->td_ucred, td);
927 	}
928 	if (textset) {
929 		if (!locked) {
930 			locked = true;
931 			VOP_LOCK(vp, LK_SHARED | LK_RETRY);
932 		}
933 		VOP_UNSET_TEXT_CHECKED(vp);
934 	}
935 	if (locked)
936 		VOP_UNLOCK(vp);
937 
938 	/* Release the temporary mapping. */
939 	if (a_out)
940 		kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE);
941 
942 	return (error);
943 }
944