xref: /freebsd/sys/compat/linux/linux_misc.c (revision 0957b409)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2002 Doug Rabson
5  * Copyright (c) 1994-1995 Søren Schmidt
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer
13  *    in this position and unchanged.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_compat.h"
36 
37 #include <sys/param.h>
38 #include <sys/blist.h>
39 #include <sys/fcntl.h>
40 #if defined(__i386__)
41 #include <sys/imgact_aout.h>
42 #endif
43 #include <sys/jail.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mman.h>
49 #include <sys/mount.h>
50 #include <sys/mutex.h>
51 #include <sys/namei.h>
52 #include <sys/priv.h>
53 #include <sys/proc.h>
54 #include <sys/reboot.h>
55 #include <sys/racct.h>
56 #include <sys/random.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sched.h>
59 #include <sys/sdt.h>
60 #include <sys/signalvar.h>
61 #include <sys/stat.h>
62 #include <sys/syscallsubr.h>
63 #include <sys/sysctl.h>
64 #include <sys/sysproto.h>
65 #include <sys/systm.h>
66 #include <sys/time.h>
67 #include <sys/vmmeter.h>
68 #include <sys/vnode.h>
69 #include <sys/wait.h>
70 #include <sys/cpuset.h>
71 #include <sys/uio.h>
72 
73 #include <security/mac/mac_framework.h>
74 
75 #include <vm/vm.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_extern.h>
80 #include <vm/vm_object.h>
81 #include <vm/swap_pager.h>
82 
83 #ifdef COMPAT_LINUX32
84 #include <machine/../linux32/linux.h>
85 #include <machine/../linux32/linux32_proto.h>
86 #else
87 #include <machine/../linux/linux.h>
88 #include <machine/../linux/linux_proto.h>
89 #endif
90 
91 #include <compat/linux/linux_dtrace.h>
92 #include <compat/linux/linux_file.h>
93 #include <compat/linux/linux_mib.h>
94 #include <compat/linux/linux_signal.h>
95 #include <compat/linux/linux_timer.h>
96 #include <compat/linux/linux_util.h>
97 #include <compat/linux/linux_sysproto.h>
98 #include <compat/linux/linux_emul.h>
99 #include <compat/linux/linux_misc.h>
100 
101 /**
102  * Special DTrace provider for the linuxulator.
103  *
104  * In this file we define the provider for the entire linuxulator. All
105  * modules (= files of the linuxulator) use it.
106  *
107  * We define a different name depending on the emulated bitsize, see
108  * ../../<ARCH>/linux{,32}/linux.h, e.g.:
109  *      native bitsize          = linuxulator
110  *      amd64, 32bit emulation  = linuxulator32
111  */
112 LIN_SDT_PROVIDER_DEFINE(LINUX_DTRACE);
113 
114 int stclohz;				/* Statistics clock frequency */
115 
116 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = {
117 	RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK,
118 	RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE,
119 	RLIMIT_MEMLOCK, RLIMIT_AS
120 };
121 
122 struct l_sysinfo {
123 	l_long		uptime;		/* Seconds since boot */
124 	l_ulong		loads[3];	/* 1, 5, and 15 minute load averages */
125 #define LINUX_SYSINFO_LOADS_SCALE 65536
126 	l_ulong		totalram;	/* Total usable main memory size */
127 	l_ulong		freeram;	/* Available memory size */
128 	l_ulong		sharedram;	/* Amount of shared memory */
129 	l_ulong		bufferram;	/* Memory used by buffers */
130 	l_ulong		totalswap;	/* Total swap space size */
131 	l_ulong		freeswap;	/* swap space still available */
132 	l_ushort	procs;		/* Number of current processes */
133 	l_ushort	pads;
134 	l_ulong		totalbig;
135 	l_ulong		freebig;
136 	l_uint		mem_unit;
137 	char		_f[20-2*sizeof(l_long)-sizeof(l_int)];	/* padding */
138 };
139 
140 struct l_pselect6arg {
141 	l_uintptr_t	ss;
142 	l_size_t	ss_len;
143 };
144 
145 static int	linux_utimensat_nsec_valid(l_long);
146 
147 
148 int
149 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args)
150 {
151 	struct l_sysinfo sysinfo;
152 	vm_object_t object;
153 	int i, j;
154 	struct timespec ts;
155 
156 	bzero(&sysinfo, sizeof(sysinfo));
157 	getnanouptime(&ts);
158 	if (ts.tv_nsec != 0)
159 		ts.tv_sec++;
160 	sysinfo.uptime = ts.tv_sec;
161 
162 	/* Use the information from the mib to get our load averages */
163 	for (i = 0; i < 3; i++)
164 		sysinfo.loads[i] = averunnable.ldavg[i] *
165 		    LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale;
166 
167 	sysinfo.totalram = physmem * PAGE_SIZE;
168 	sysinfo.freeram = sysinfo.totalram - vm_wire_count() * PAGE_SIZE;
169 
170 	sysinfo.sharedram = 0;
171 	mtx_lock(&vm_object_list_mtx);
172 	TAILQ_FOREACH(object, &vm_object_list, object_list)
173 		if (object->shadow_count > 1)
174 			sysinfo.sharedram += object->resident_page_count;
175 	mtx_unlock(&vm_object_list_mtx);
176 
177 	sysinfo.sharedram *= PAGE_SIZE;
178 	sysinfo.bufferram = 0;
179 
180 	swap_pager_status(&i, &j);
181 	sysinfo.totalswap = i * PAGE_SIZE;
182 	sysinfo.freeswap = (i - j) * PAGE_SIZE;
183 
184 	sysinfo.procs = nprocs;
185 
186 	/* The following are only present in newer Linux kernels. */
187 	sysinfo.totalbig = 0;
188 	sysinfo.freebig = 0;
189 	sysinfo.mem_unit = 1;
190 
191 	return (copyout(&sysinfo, args->info, sizeof(sysinfo)));
192 }
193 
194 #ifdef LINUX_LEGACY_SYSCALLS
195 int
196 linux_alarm(struct thread *td, struct linux_alarm_args *args)
197 {
198 	struct itimerval it, old_it;
199 	u_int secs;
200 	int error;
201 
202 #ifdef DEBUG
203 	if (ldebug(alarm))
204 		printf(ARGS(alarm, "%u"), args->secs);
205 #endif
206 	secs = args->secs;
207 	/*
208 	 * Linux alarm() is always successful. Limit secs to INT32_MAX / 2
209 	 * to match kern_setitimer()'s limit to avoid error from it.
210 	 *
211 	 * XXX. Linux limit secs to INT_MAX on 32 and does not limit on 64-bit
212 	 * platforms.
213 	 */
214 	if (secs > INT32_MAX / 2)
215 		secs = INT32_MAX / 2;
216 
217 	it.it_value.tv_sec = secs;
218 	it.it_value.tv_usec = 0;
219 	timevalclear(&it.it_interval);
220 	error = kern_setitimer(td, ITIMER_REAL, &it, &old_it);
221 	KASSERT(error == 0, ("kern_setitimer returns %d", error));
222 
223 	if ((old_it.it_value.tv_sec == 0 && old_it.it_value.tv_usec > 0) ||
224 	    old_it.it_value.tv_usec >= 500000)
225 		old_it.it_value.tv_sec++;
226 	td->td_retval[0] = old_it.it_value.tv_sec;
227 	return (0);
228 }
229 #endif
230 
231 int
232 linux_brk(struct thread *td, struct linux_brk_args *args)
233 {
234 	struct vmspace *vm = td->td_proc->p_vmspace;
235 	uintptr_t new, old;
236 
237 #ifdef DEBUG
238 	if (ldebug(brk))
239 		printf(ARGS(brk, "%p"), (void *)(uintptr_t)args->dsend);
240 #endif
241 	old = (uintptr_t)vm->vm_daddr + ctob(vm->vm_dsize);
242 	new = (uintptr_t)args->dsend;
243 	if ((caddr_t)new > vm->vm_daddr && !kern_break(td, &new))
244 		td->td_retval[0] = (register_t)new;
245 	else
246 		td->td_retval[0] = (register_t)old;
247 
248 	return (0);
249 }
250 
251 #if defined(__i386__)
252 /* XXX: what about amd64/linux32? */
253 
254 int
255 linux_uselib(struct thread *td, struct linux_uselib_args *args)
256 {
257 	struct nameidata ni;
258 	struct vnode *vp;
259 	struct exec *a_out;
260 	struct vattr attr;
261 	vm_offset_t vmaddr;
262 	unsigned long file_offset;
263 	unsigned long bss_size;
264 	char *library;
265 	ssize_t aresid;
266 	int error, locked, writecount;
267 
268 	LCONVPATHEXIST(td, args->library, &library);
269 
270 #ifdef DEBUG
271 	if (ldebug(uselib))
272 		printf(ARGS(uselib, "%s"), library);
273 #endif
274 
275 	a_out = NULL;
276 	locked = 0;
277 	vp = NULL;
278 
279 	NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1,
280 	    UIO_SYSSPACE, library, td);
281 	error = namei(&ni);
282 	LFREEPATH(library);
283 	if (error)
284 		goto cleanup;
285 
286 	vp = ni.ni_vp;
287 	NDFREE(&ni, NDF_ONLY_PNBUF);
288 
289 	/*
290 	 * From here on down, we have a locked vnode that must be unlocked.
291 	 * XXX: The code below largely duplicates exec_check_permissions().
292 	 */
293 	locked = 1;
294 
295 	/* Writable? */
296 	error = VOP_GET_WRITECOUNT(vp, &writecount);
297 	if (error != 0)
298 		goto cleanup;
299 	if (writecount != 0) {
300 		error = ETXTBSY;
301 		goto cleanup;
302 	}
303 
304 	/* Executable? */
305 	error = VOP_GETATTR(vp, &attr, td->td_ucred);
306 	if (error)
307 		goto cleanup;
308 
309 	if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
310 	    ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) {
311 		/* EACCESS is what exec(2) returns. */
312 		error = ENOEXEC;
313 		goto cleanup;
314 	}
315 
316 	/* Sensible size? */
317 	if (attr.va_size == 0) {
318 		error = ENOEXEC;
319 		goto cleanup;
320 	}
321 
322 	/* Can we access it? */
323 	error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
324 	if (error)
325 		goto cleanup;
326 
327 	/*
328 	 * XXX: This should use vn_open() so that it is properly authorized,
329 	 * and to reduce code redundancy all over the place here.
330 	 * XXX: Not really, it duplicates far more of exec_check_permissions()
331 	 * than vn_open().
332 	 */
333 #ifdef MAC
334 	error = mac_vnode_check_open(td->td_ucred, vp, VREAD);
335 	if (error)
336 		goto cleanup;
337 #endif
338 	error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
339 	if (error)
340 		goto cleanup;
341 
342 	/* Pull in executable header into exec_map */
343 	error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE,
344 	    VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0);
345 	if (error)
346 		goto cleanup;
347 
348 	/* Is it a Linux binary ? */
349 	if (((a_out->a_magic >> 16) & 0xff) != 0x64) {
350 		error = ENOEXEC;
351 		goto cleanup;
352 	}
353 
354 	/*
355 	 * While we are here, we should REALLY do some more checks
356 	 */
357 
358 	/* Set file/virtual offset based on a.out variant. */
359 	switch ((int)(a_out->a_magic & 0xffff)) {
360 	case 0413:			/* ZMAGIC */
361 		file_offset = 1024;
362 		break;
363 	case 0314:			/* QMAGIC */
364 		file_offset = 0;
365 		break;
366 	default:
367 		error = ENOEXEC;
368 		goto cleanup;
369 	}
370 
371 	bss_size = round_page(a_out->a_bss);
372 
373 	/* Check various fields in header for validity/bounds. */
374 	if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) {
375 		error = ENOEXEC;
376 		goto cleanup;
377 	}
378 
379 	/* text + data can't exceed file size */
380 	if (a_out->a_data + a_out->a_text > attr.va_size) {
381 		error = EFAULT;
382 		goto cleanup;
383 	}
384 
385 	/*
386 	 * text/data/bss must not exceed limits
387 	 * XXX - this is not complete. it should check current usage PLUS
388 	 * the resources needed by this library.
389 	 */
390 	PROC_LOCK(td->td_proc);
391 	if (a_out->a_text > maxtsiz ||
392 	    a_out->a_data + bss_size > lim_cur_proc(td->td_proc, RLIMIT_DATA) ||
393 	    racct_set(td->td_proc, RACCT_DATA, a_out->a_data +
394 	    bss_size) != 0) {
395 		PROC_UNLOCK(td->td_proc);
396 		error = ENOMEM;
397 		goto cleanup;
398 	}
399 	PROC_UNLOCK(td->td_proc);
400 
401 	/*
402 	 * Prevent more writers.
403 	 * XXX: Note that if any of the VM operations fail below we don't
404 	 * clear this flag.
405 	 */
406 	VOP_SET_TEXT(vp);
407 
408 	/*
409 	 * Lock no longer needed
410 	 */
411 	locked = 0;
412 	VOP_UNLOCK(vp, 0);
413 
414 	/*
415 	 * Check if file_offset page aligned. Currently we cannot handle
416 	 * misalinged file offsets, and so we read in the entire image
417 	 * (what a waste).
418 	 */
419 	if (file_offset & PAGE_MASK) {
420 #ifdef DEBUG
421 		printf("uselib: Non page aligned binary %lu\n", file_offset);
422 #endif
423 		/* Map text+data read/write/execute */
424 
425 		/* a_entry is the load address and is page aligned */
426 		vmaddr = trunc_page(a_out->a_entry);
427 
428 		/* get anon user mapping, read+write+execute */
429 		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
430 		    &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE,
431 		    VM_PROT_ALL, VM_PROT_ALL, 0);
432 		if (error)
433 			goto cleanup;
434 
435 		error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset,
436 		    a_out->a_text + a_out->a_data, UIO_USERSPACE, 0,
437 		    td->td_ucred, NOCRED, &aresid, td);
438 		if (error != 0)
439 			goto cleanup;
440 		if (aresid != 0) {
441 			error = ENOEXEC;
442 			goto cleanup;
443 		}
444 	} else {
445 #ifdef DEBUG
446 		printf("uselib: Page aligned binary %lu\n", file_offset);
447 #endif
448 		/*
449 		 * for QMAGIC, a_entry is 20 bytes beyond the load address
450 		 * to skip the executable header
451 		 */
452 		vmaddr = trunc_page(a_out->a_entry);
453 
454 		/*
455 		 * Map it all into the process's space as a single
456 		 * copy-on-write "data" segment.
457 		 */
458 		error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr,
459 		    a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL,
460 		    MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset);
461 		if (error)
462 			goto cleanup;
463 	}
464 #ifdef DEBUG
465 	printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long *)vmaddr)[0],
466 	    ((long *)vmaddr)[1]);
467 #endif
468 	if (bss_size != 0) {
469 		/* Calculate BSS start address */
470 		vmaddr = trunc_page(a_out->a_entry) + a_out->a_text +
471 		    a_out->a_data;
472 
473 		/* allocate some 'anon' space */
474 		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
475 		    &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL,
476 		    VM_PROT_ALL, 0);
477 		if (error)
478 			goto cleanup;
479 	}
480 
481 cleanup:
482 	/* Unlock vnode if needed */
483 	if (locked)
484 		VOP_UNLOCK(vp, 0);
485 
486 	/* Release the temporary mapping. */
487 	if (a_out)
488 		kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE);
489 
490 	return (error);
491 }
492 
493 #endif	/* __i386__ */
494 
495 #ifdef LINUX_LEGACY_SYSCALLS
496 int
497 linux_select(struct thread *td, struct linux_select_args *args)
498 {
499 	l_timeval ltv;
500 	struct timeval tv0, tv1, utv, *tvp;
501 	int error;
502 
503 #ifdef DEBUG
504 	if (ldebug(select))
505 		printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds,
506 		    (void *)args->readfds, (void *)args->writefds,
507 		    (void *)args->exceptfds, (void *)args->timeout);
508 #endif
509 
510 	/*
511 	 * Store current time for computation of the amount of
512 	 * time left.
513 	 */
514 	if (args->timeout) {
515 		if ((error = copyin(args->timeout, &ltv, sizeof(ltv))))
516 			goto select_out;
517 		utv.tv_sec = ltv.tv_sec;
518 		utv.tv_usec = ltv.tv_usec;
519 #ifdef DEBUG
520 		if (ldebug(select))
521 			printf(LMSG("incoming timeout (%jd/%ld)"),
522 			    (intmax_t)utv.tv_sec, utv.tv_usec);
523 #endif
524 
525 		if (itimerfix(&utv)) {
526 			/*
527 			 * The timeval was invalid.  Convert it to something
528 			 * valid that will act as it does under Linux.
529 			 */
530 			utv.tv_sec += utv.tv_usec / 1000000;
531 			utv.tv_usec %= 1000000;
532 			if (utv.tv_usec < 0) {
533 				utv.tv_sec -= 1;
534 				utv.tv_usec += 1000000;
535 			}
536 			if (utv.tv_sec < 0)
537 				timevalclear(&utv);
538 		}
539 		microtime(&tv0);
540 		tvp = &utv;
541 	} else
542 		tvp = NULL;
543 
544 	error = kern_select(td, args->nfds, args->readfds, args->writefds,
545 	    args->exceptfds, tvp, LINUX_NFDBITS);
546 
547 #ifdef DEBUG
548 	if (ldebug(select))
549 		printf(LMSG("real select returns %d"), error);
550 #endif
551 	if (error)
552 		goto select_out;
553 
554 	if (args->timeout) {
555 		if (td->td_retval[0]) {
556 			/*
557 			 * Compute how much time was left of the timeout,
558 			 * by subtracting the current time and the time
559 			 * before we started the call, and subtracting
560 			 * that result from the user-supplied value.
561 			 */
562 			microtime(&tv1);
563 			timevalsub(&tv1, &tv0);
564 			timevalsub(&utv, &tv1);
565 			if (utv.tv_sec < 0)
566 				timevalclear(&utv);
567 		} else
568 			timevalclear(&utv);
569 #ifdef DEBUG
570 		if (ldebug(select))
571 			printf(LMSG("outgoing timeout (%jd/%ld)"),
572 			    (intmax_t)utv.tv_sec, utv.tv_usec);
573 #endif
574 		ltv.tv_sec = utv.tv_sec;
575 		ltv.tv_usec = utv.tv_usec;
576 		if ((error = copyout(&ltv, args->timeout, sizeof(ltv))))
577 			goto select_out;
578 	}
579 
580 select_out:
581 #ifdef DEBUG
582 	if (ldebug(select))
583 		printf(LMSG("select_out -> %d"), error);
584 #endif
585 	return (error);
586 }
587 #endif
588 
589 int
590 linux_mremap(struct thread *td, struct linux_mremap_args *args)
591 {
592 	uintptr_t addr;
593 	size_t len;
594 	int error = 0;
595 
596 #ifdef DEBUG
597 	if (ldebug(mremap))
598 		printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"),
599 		    (void *)(uintptr_t)args->addr,
600 		    (unsigned long)args->old_len,
601 		    (unsigned long)args->new_len,
602 		    (unsigned long)args->flags);
603 #endif
604 
605 	if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) {
606 		td->td_retval[0] = 0;
607 		return (EINVAL);
608 	}
609 
610 	/*
611 	 * Check for the page alignment.
612 	 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK.
613 	 */
614 	if (args->addr & PAGE_MASK) {
615 		td->td_retval[0] = 0;
616 		return (EINVAL);
617 	}
618 
619 	args->new_len = round_page(args->new_len);
620 	args->old_len = round_page(args->old_len);
621 
622 	if (args->new_len > args->old_len) {
623 		td->td_retval[0] = 0;
624 		return (ENOMEM);
625 	}
626 
627 	if (args->new_len < args->old_len) {
628 		addr = args->addr + args->new_len;
629 		len = args->old_len - args->new_len;
630 		error = kern_munmap(td, addr, len);
631 	}
632 
633 	td->td_retval[0] = error ? 0 : (uintptr_t)args->addr;
634 	return (error);
635 }
636 
637 #define LINUX_MS_ASYNC       0x0001
638 #define LINUX_MS_INVALIDATE  0x0002
639 #define LINUX_MS_SYNC        0x0004
640 
641 int
642 linux_msync(struct thread *td, struct linux_msync_args *args)
643 {
644 
645 	return (kern_msync(td, args->addr, args->len,
646 	    args->fl & ~LINUX_MS_SYNC));
647 }
648 
649 #ifdef LINUX_LEGACY_SYSCALLS
650 int
651 linux_time(struct thread *td, struct linux_time_args *args)
652 {
653 	struct timeval tv;
654 	l_time_t tm;
655 	int error;
656 
657 #ifdef DEBUG
658 	if (ldebug(time))
659 		printf(ARGS(time, "*"));
660 #endif
661 
662 	microtime(&tv);
663 	tm = tv.tv_sec;
664 	if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm))))
665 		return (error);
666 	td->td_retval[0] = tm;
667 	return (0);
668 }
669 #endif
670 
671 struct l_times_argv {
672 	l_clock_t	tms_utime;
673 	l_clock_t	tms_stime;
674 	l_clock_t	tms_cutime;
675 	l_clock_t	tms_cstime;
676 };
677 
678 
679 /*
680  * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value.
681  * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK
682  * auxiliary vector entry.
683  */
684 #define	CLK_TCK		100
685 
686 #define	CONVOTCK(r)	(r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK))
687 #define	CONVNTCK(r)	(r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz))
688 
689 #define	CONVTCK(r)	(linux_kernver(td) >= LINUX_KERNVER_2004000 ?		\
690 			    CONVNTCK(r) : CONVOTCK(r))
691 
692 int
693 linux_times(struct thread *td, struct linux_times_args *args)
694 {
695 	struct timeval tv, utime, stime, cutime, cstime;
696 	struct l_times_argv tms;
697 	struct proc *p;
698 	int error;
699 
700 #ifdef DEBUG
701 	if (ldebug(times))
702 		printf(ARGS(times, "*"));
703 #endif
704 
705 	if (args->buf != NULL) {
706 		p = td->td_proc;
707 		PROC_LOCK(p);
708 		PROC_STATLOCK(p);
709 		calcru(p, &utime, &stime);
710 		PROC_STATUNLOCK(p);
711 		calccru(p, &cutime, &cstime);
712 		PROC_UNLOCK(p);
713 
714 		tms.tms_utime = CONVTCK(utime);
715 		tms.tms_stime = CONVTCK(stime);
716 
717 		tms.tms_cutime = CONVTCK(cutime);
718 		tms.tms_cstime = CONVTCK(cstime);
719 
720 		if ((error = copyout(&tms, args->buf, sizeof(tms))))
721 			return (error);
722 	}
723 
724 	microuptime(&tv);
725 	td->td_retval[0] = (int)CONVTCK(tv);
726 	return (0);
727 }
728 
729 int
730 linux_newuname(struct thread *td, struct linux_newuname_args *args)
731 {
732 	struct l_new_utsname utsname;
733 	char osname[LINUX_MAX_UTSNAME];
734 	char osrelease[LINUX_MAX_UTSNAME];
735 	char *p;
736 
737 #ifdef DEBUG
738 	if (ldebug(newuname))
739 		printf(ARGS(newuname, "*"));
740 #endif
741 
742 	linux_get_osname(td, osname);
743 	linux_get_osrelease(td, osrelease);
744 
745 	bzero(&utsname, sizeof(utsname));
746 	strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME);
747 	getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME);
748 	getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME);
749 	strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME);
750 	strlcpy(utsname.version, version, LINUX_MAX_UTSNAME);
751 	for (p = utsname.version; *p != '\0'; ++p)
752 		if (*p == '\n') {
753 			*p = '\0';
754 			break;
755 		}
756 	strlcpy(utsname.machine, linux_kplatform, LINUX_MAX_UTSNAME);
757 
758 	return (copyout(&utsname, args->buf, sizeof(utsname)));
759 }
760 
761 struct l_utimbuf {
762 	l_time_t l_actime;
763 	l_time_t l_modtime;
764 };
765 
766 #ifdef LINUX_LEGACY_SYSCALLS
767 int
768 linux_utime(struct thread *td, struct linux_utime_args *args)
769 {
770 	struct timeval tv[2], *tvp;
771 	struct l_utimbuf lut;
772 	char *fname;
773 	int error;
774 
775 	LCONVPATHEXIST(td, args->fname, &fname);
776 
777 #ifdef DEBUG
778 	if (ldebug(utime))
779 		printf(ARGS(utime, "%s, *"), fname);
780 #endif
781 
782 	if (args->times) {
783 		if ((error = copyin(args->times, &lut, sizeof lut))) {
784 			LFREEPATH(fname);
785 			return (error);
786 		}
787 		tv[0].tv_sec = lut.l_actime;
788 		tv[0].tv_usec = 0;
789 		tv[1].tv_sec = lut.l_modtime;
790 		tv[1].tv_usec = 0;
791 		tvp = tv;
792 	} else
793 		tvp = NULL;
794 
795 	error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, tvp,
796 	    UIO_SYSSPACE);
797 	LFREEPATH(fname);
798 	return (error);
799 }
800 #endif
801 
802 #ifdef LINUX_LEGACY_SYSCALLS
803 int
804 linux_utimes(struct thread *td, struct linux_utimes_args *args)
805 {
806 	l_timeval ltv[2];
807 	struct timeval tv[2], *tvp = NULL;
808 	char *fname;
809 	int error;
810 
811 	LCONVPATHEXIST(td, args->fname, &fname);
812 
813 #ifdef DEBUG
814 	if (ldebug(utimes))
815 		printf(ARGS(utimes, "%s, *"), fname);
816 #endif
817 
818 	if (args->tptr != NULL) {
819 		if ((error = copyin(args->tptr, ltv, sizeof ltv))) {
820 			LFREEPATH(fname);
821 			return (error);
822 		}
823 		tv[0].tv_sec = ltv[0].tv_sec;
824 		tv[0].tv_usec = ltv[0].tv_usec;
825 		tv[1].tv_sec = ltv[1].tv_sec;
826 		tv[1].tv_usec = ltv[1].tv_usec;
827 		tvp = tv;
828 	}
829 
830 	error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE,
831 	    tvp, UIO_SYSSPACE);
832 	LFREEPATH(fname);
833 	return (error);
834 }
835 #endif
836 
837 static int
838 linux_utimensat_nsec_valid(l_long nsec)
839 {
840 
841 	if (nsec == LINUX_UTIME_OMIT || nsec == LINUX_UTIME_NOW)
842 		return (0);
843 	if (nsec >= 0 && nsec <= 999999999)
844 		return (0);
845 	return (1);
846 }
847 
848 int
849 linux_utimensat(struct thread *td, struct linux_utimensat_args *args)
850 {
851 	struct l_timespec l_times[2];
852 	struct timespec times[2], *timesp = NULL;
853 	char *path = NULL;
854 	int error, dfd, flags = 0;
855 
856 	dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd;
857 
858 #ifdef DEBUG
859 	if (ldebug(utimensat))
860 		printf(ARGS(utimensat, "%d, *"), dfd);
861 #endif
862 
863 	if (args->flags & ~LINUX_AT_SYMLINK_NOFOLLOW)
864 		return (EINVAL);
865 
866 	if (args->times != NULL) {
867 		error = copyin(args->times, l_times, sizeof(l_times));
868 		if (error != 0)
869 			return (error);
870 
871 		if (linux_utimensat_nsec_valid(l_times[0].tv_nsec) != 0 ||
872 		    linux_utimensat_nsec_valid(l_times[1].tv_nsec) != 0)
873 			return (EINVAL);
874 
875 		times[0].tv_sec = l_times[0].tv_sec;
876 		switch (l_times[0].tv_nsec)
877 		{
878 		case LINUX_UTIME_OMIT:
879 			times[0].tv_nsec = UTIME_OMIT;
880 			break;
881 		case LINUX_UTIME_NOW:
882 			times[0].tv_nsec = UTIME_NOW;
883 			break;
884 		default:
885 			times[0].tv_nsec = l_times[0].tv_nsec;
886 		}
887 
888 		times[1].tv_sec = l_times[1].tv_sec;
889 		switch (l_times[1].tv_nsec)
890 		{
891 		case LINUX_UTIME_OMIT:
892 			times[1].tv_nsec = UTIME_OMIT;
893 			break;
894 		case LINUX_UTIME_NOW:
895 			times[1].tv_nsec = UTIME_NOW;
896 			break;
897 		default:
898 			times[1].tv_nsec = l_times[1].tv_nsec;
899 			break;
900 		}
901 		timesp = times;
902 
903 		/* This breaks POSIX, but is what the Linux kernel does
904 		 * _on purpose_ (documented in the man page for utimensat(2)),
905 		 * so we must follow that behaviour. */
906 		if (times[0].tv_nsec == UTIME_OMIT &&
907 		    times[1].tv_nsec == UTIME_OMIT)
908 			return (0);
909 	}
910 
911 	if (args->pathname != NULL)
912 		LCONVPATHEXIST_AT(td, args->pathname, &path, dfd);
913 	else if (args->flags != 0)
914 		return (EINVAL);
915 
916 	if (args->flags & LINUX_AT_SYMLINK_NOFOLLOW)
917 		flags |= AT_SYMLINK_NOFOLLOW;
918 
919 	if (path == NULL)
920 		error = kern_futimens(td, dfd, timesp, UIO_SYSSPACE);
921 	else {
922 		error = kern_utimensat(td, dfd, path, UIO_SYSSPACE, timesp,
923 			UIO_SYSSPACE, flags);
924 		LFREEPATH(path);
925 	}
926 
927 	return (error);
928 }
929 
930 #ifdef LINUX_LEGACY_SYSCALLS
931 int
932 linux_futimesat(struct thread *td, struct linux_futimesat_args *args)
933 {
934 	l_timeval ltv[2];
935 	struct timeval tv[2], *tvp = NULL;
936 	char *fname;
937 	int error, dfd;
938 
939 	dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd;
940 	LCONVPATHEXIST_AT(td, args->filename, &fname, dfd);
941 
942 #ifdef DEBUG
943 	if (ldebug(futimesat))
944 		printf(ARGS(futimesat, "%s, *"), fname);
945 #endif
946 
947 	if (args->utimes != NULL) {
948 		if ((error = copyin(args->utimes, ltv, sizeof ltv))) {
949 			LFREEPATH(fname);
950 			return (error);
951 		}
952 		tv[0].tv_sec = ltv[0].tv_sec;
953 		tv[0].tv_usec = ltv[0].tv_usec;
954 		tv[1].tv_sec = ltv[1].tv_sec;
955 		tv[1].tv_usec = ltv[1].tv_usec;
956 		tvp = tv;
957 	}
958 
959 	error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE);
960 	LFREEPATH(fname);
961 	return (error);
962 }
963 #endif
964 
965 int
966 linux_common_wait(struct thread *td, int pid, int *status,
967     int options, struct rusage *ru)
968 {
969 	int error, tmpstat;
970 
971 	error = kern_wait(td, pid, &tmpstat, options, ru);
972 	if (error)
973 		return (error);
974 
975 	if (status) {
976 		tmpstat &= 0xffff;
977 		if (WIFSIGNALED(tmpstat))
978 			tmpstat = (tmpstat & 0xffffff80) |
979 			    bsd_to_linux_signal(WTERMSIG(tmpstat));
980 		else if (WIFSTOPPED(tmpstat))
981 			tmpstat = (tmpstat & 0xffff00ff) |
982 			    (bsd_to_linux_signal(WSTOPSIG(tmpstat)) << 8);
983 		else if (WIFCONTINUED(tmpstat))
984 			tmpstat = 0xffff;
985 		error = copyout(&tmpstat, status, sizeof(int));
986 	}
987 
988 	return (error);
989 }
990 
991 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
992 int
993 linux_waitpid(struct thread *td, struct linux_waitpid_args *args)
994 {
995 	struct linux_wait4_args wait4_args;
996 
997 #ifdef DEBUG
998 	if (ldebug(waitpid))
999 		printf(ARGS(waitpid, "%d, %p, %d"),
1000 		    args->pid, (void *)args->status, args->options);
1001 #endif
1002 
1003 	wait4_args.pid = args->pid;
1004 	wait4_args.status = args->status;
1005 	wait4_args.options = args->options;
1006 	wait4_args.rusage = NULL;
1007 
1008 	return (linux_wait4(td, &wait4_args));
1009 }
1010 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
1011 
1012 int
1013 linux_wait4(struct thread *td, struct linux_wait4_args *args)
1014 {
1015 	int error, options;
1016 	struct rusage ru, *rup;
1017 
1018 #ifdef DEBUG
1019 	if (ldebug(wait4))
1020 		printf(ARGS(wait4, "%d, %p, %d, %p"),
1021 		    args->pid, (void *)args->status, args->options,
1022 		    (void *)args->rusage);
1023 #endif
1024 	if (args->options & ~(LINUX_WUNTRACED | LINUX_WNOHANG |
1025 	    LINUX_WCONTINUED | __WCLONE | __WNOTHREAD | __WALL))
1026 		return (EINVAL);
1027 
1028 	options = WEXITED;
1029 	linux_to_bsd_waitopts(args->options, &options);
1030 
1031 	if (args->rusage != NULL)
1032 		rup = &ru;
1033 	else
1034 		rup = NULL;
1035 	error = linux_common_wait(td, args->pid, args->status, options, rup);
1036 	if (error != 0)
1037 		return (error);
1038 	if (args->rusage != NULL)
1039 		error = linux_copyout_rusage(&ru, args->rusage);
1040 	return (error);
1041 }
1042 
1043 int
1044 linux_waitid(struct thread *td, struct linux_waitid_args *args)
1045 {
1046 	int status, options, sig;
1047 	struct __wrusage wru;
1048 	siginfo_t siginfo;
1049 	l_siginfo_t lsi;
1050 	idtype_t idtype;
1051 	struct proc *p;
1052 	int error;
1053 
1054 	options = 0;
1055 	linux_to_bsd_waitopts(args->options, &options);
1056 
1057 	if (options & ~(WNOHANG | WNOWAIT | WEXITED | WUNTRACED | WCONTINUED))
1058 		return (EINVAL);
1059 	if (!(options & (WEXITED | WUNTRACED | WCONTINUED)))
1060 		return (EINVAL);
1061 
1062 	switch (args->idtype) {
1063 	case LINUX_P_ALL:
1064 		idtype = P_ALL;
1065 		break;
1066 	case LINUX_P_PID:
1067 		if (args->id <= 0)
1068 			return (EINVAL);
1069 		idtype = P_PID;
1070 		break;
1071 	case LINUX_P_PGID:
1072 		if (args->id <= 0)
1073 			return (EINVAL);
1074 		idtype = P_PGID;
1075 		break;
1076 	default:
1077 		return (EINVAL);
1078 	}
1079 
1080 	error = kern_wait6(td, idtype, args->id, &status, options,
1081 	    &wru, &siginfo);
1082 	if (error != 0)
1083 		return (error);
1084 	if (args->rusage != NULL) {
1085 		error = linux_copyout_rusage(&wru.wru_children,
1086 		    args->rusage);
1087 		if (error != 0)
1088 			return (error);
1089 	}
1090 	if (args->info != NULL) {
1091 		p = td->td_proc;
1092 		bzero(&lsi, sizeof(lsi));
1093 		if (td->td_retval[0] != 0) {
1094 			sig = bsd_to_linux_signal(siginfo.si_signo);
1095 			siginfo_to_lsiginfo(&siginfo, &lsi, sig);
1096 		}
1097 		error = copyout(&lsi, args->info, sizeof(lsi));
1098 	}
1099 	td->td_retval[0] = 0;
1100 
1101 	return (error);
1102 }
1103 
1104 #ifdef LINUX_LEGACY_SYSCALLS
1105 int
1106 linux_mknod(struct thread *td, struct linux_mknod_args *args)
1107 {
1108 	char *path;
1109 	int error;
1110 
1111 	LCONVPATHCREAT(td, args->path, &path);
1112 
1113 #ifdef DEBUG
1114 	if (ldebug(mknod))
1115 		printf(ARGS(mknod, "%s, %d, %ju"), path, args->mode,
1116 		    (uintmax_t)args->dev);
1117 #endif
1118 
1119 	switch (args->mode & S_IFMT) {
1120 	case S_IFIFO:
1121 	case S_IFSOCK:
1122 		error = kern_mkfifoat(td, AT_FDCWD, path, UIO_SYSSPACE,
1123 		    args->mode);
1124 		break;
1125 
1126 	case S_IFCHR:
1127 	case S_IFBLK:
1128 		error = kern_mknodat(td, AT_FDCWD, path, UIO_SYSSPACE,
1129 		    args->mode, args->dev);
1130 		break;
1131 
1132 	case S_IFDIR:
1133 		error = EPERM;
1134 		break;
1135 
1136 	case 0:
1137 		args->mode |= S_IFREG;
1138 		/* FALLTHROUGH */
1139 	case S_IFREG:
1140 		error = kern_openat(td, AT_FDCWD, path, UIO_SYSSPACE,
1141 		    O_WRONLY | O_CREAT | O_TRUNC, args->mode);
1142 		if (error == 0)
1143 			kern_close(td, td->td_retval[0]);
1144 		break;
1145 
1146 	default:
1147 		error = EINVAL;
1148 		break;
1149 	}
1150 	LFREEPATH(path);
1151 	return (error);
1152 }
1153 #endif
1154 
1155 int
1156 linux_mknodat(struct thread *td, struct linux_mknodat_args *args)
1157 {
1158 	char *path;
1159 	int error, dfd;
1160 
1161 	dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd;
1162 	LCONVPATHCREAT_AT(td, args->filename, &path, dfd);
1163 
1164 #ifdef DEBUG
1165 	if (ldebug(mknodat))
1166 		printf(ARGS(mknodat, "%s, %d, %d"), path, args->mode, args->dev);
1167 #endif
1168 
1169 	switch (args->mode & S_IFMT) {
1170 	case S_IFIFO:
1171 	case S_IFSOCK:
1172 		error = kern_mkfifoat(td, dfd, path, UIO_SYSSPACE, args->mode);
1173 		break;
1174 
1175 	case S_IFCHR:
1176 	case S_IFBLK:
1177 		error = kern_mknodat(td, dfd, path, UIO_SYSSPACE, args->mode,
1178 		    args->dev);
1179 		break;
1180 
1181 	case S_IFDIR:
1182 		error = EPERM;
1183 		break;
1184 
1185 	case 0:
1186 		args->mode |= S_IFREG;
1187 		/* FALLTHROUGH */
1188 	case S_IFREG:
1189 		error = kern_openat(td, dfd, path, UIO_SYSSPACE,
1190 		    O_WRONLY | O_CREAT | O_TRUNC, args->mode);
1191 		if (error == 0)
1192 			kern_close(td, td->td_retval[0]);
1193 		break;
1194 
1195 	default:
1196 		error = EINVAL;
1197 		break;
1198 	}
1199 	LFREEPATH(path);
1200 	return (error);
1201 }
1202 
1203 /*
1204  * UGH! This is just about the dumbest idea I've ever heard!!
1205  */
1206 int
1207 linux_personality(struct thread *td, struct linux_personality_args *args)
1208 {
1209 	struct linux_pemuldata *pem;
1210 	struct proc *p = td->td_proc;
1211 	uint32_t old;
1212 
1213 #ifdef DEBUG
1214 	if (ldebug(personality))
1215 		printf(ARGS(personality, "%u"), args->per);
1216 #endif
1217 
1218 	PROC_LOCK(p);
1219 	pem = pem_find(p);
1220 	old = pem->persona;
1221 	if (args->per != 0xffffffff)
1222 		pem->persona = args->per;
1223 	PROC_UNLOCK(p);
1224 
1225 	td->td_retval[0] = old;
1226 	return (0);
1227 }
1228 
1229 struct l_itimerval {
1230 	l_timeval it_interval;
1231 	l_timeval it_value;
1232 };
1233 
1234 #define	B2L_ITIMERVAL(bip, lip)						\
1235 	(bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec;		\
1236 	(bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec;	\
1237 	(bip)->it_value.tv_sec = (lip)->it_value.tv_sec;		\
1238 	(bip)->it_value.tv_usec = (lip)->it_value.tv_usec;
1239 
1240 int
1241 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap)
1242 {
1243 	int error;
1244 	struct l_itimerval ls;
1245 	struct itimerval aitv, oitv;
1246 
1247 #ifdef DEBUG
1248 	if (ldebug(setitimer))
1249 		printf(ARGS(setitimer, "%p, %p"),
1250 		    (void *)uap->itv, (void *)uap->oitv);
1251 #endif
1252 
1253 	if (uap->itv == NULL) {
1254 		uap->itv = uap->oitv;
1255 		return (linux_getitimer(td, (struct linux_getitimer_args *)uap));
1256 	}
1257 
1258 	error = copyin(uap->itv, &ls, sizeof(ls));
1259 	if (error != 0)
1260 		return (error);
1261 	B2L_ITIMERVAL(&aitv, &ls);
1262 #ifdef DEBUG
1263 	if (ldebug(setitimer)) {
1264 		printf("setitimer: value: sec: %jd, usec: %ld\n",
1265 		    (intmax_t)aitv.it_value.tv_sec, aitv.it_value.tv_usec);
1266 		printf("setitimer: interval: sec: %jd, usec: %ld\n",
1267 		    (intmax_t)aitv.it_interval.tv_sec, aitv.it_interval.tv_usec);
1268 	}
1269 #endif
1270 	error = kern_setitimer(td, uap->which, &aitv, &oitv);
1271 	if (error != 0 || uap->oitv == NULL)
1272 		return (error);
1273 	B2L_ITIMERVAL(&ls, &oitv);
1274 
1275 	return (copyout(&ls, uap->oitv, sizeof(ls)));
1276 }
1277 
1278 int
1279 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap)
1280 {
1281 	int error;
1282 	struct l_itimerval ls;
1283 	struct itimerval aitv;
1284 
1285 #ifdef DEBUG
1286 	if (ldebug(getitimer))
1287 		printf(ARGS(getitimer, "%p"), (void *)uap->itv);
1288 #endif
1289 	error = kern_getitimer(td, uap->which, &aitv);
1290 	if (error != 0)
1291 		return (error);
1292 	B2L_ITIMERVAL(&ls, &aitv);
1293 	return (copyout(&ls, uap->itv, sizeof(ls)));
1294 }
1295 
1296 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
1297 int
1298 linux_nice(struct thread *td, struct linux_nice_args *args)
1299 {
1300 	struct setpriority_args bsd_args;
1301 
1302 	bsd_args.which = PRIO_PROCESS;
1303 	bsd_args.who = 0;		/* current process */
1304 	bsd_args.prio = args->inc;
1305 	return (sys_setpriority(td, &bsd_args));
1306 }
1307 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
1308 
1309 int
1310 linux_setgroups(struct thread *td, struct linux_setgroups_args *args)
1311 {
1312 	struct ucred *newcred, *oldcred;
1313 	l_gid_t *linux_gidset;
1314 	gid_t *bsd_gidset;
1315 	int ngrp, error;
1316 	struct proc *p;
1317 
1318 	ngrp = args->gidsetsize;
1319 	if (ngrp < 0 || ngrp >= ngroups_max + 1)
1320 		return (EINVAL);
1321 	linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK);
1322 	error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t));
1323 	if (error)
1324 		goto out;
1325 	newcred = crget();
1326 	crextend(newcred, ngrp + 1);
1327 	p = td->td_proc;
1328 	PROC_LOCK(p);
1329 	oldcred = p->p_ucred;
1330 	crcopy(newcred, oldcred);
1331 
1332 	/*
1333 	 * cr_groups[0] holds egid. Setting the whole set from
1334 	 * the supplied set will cause egid to be changed too.
1335 	 * Keep cr_groups[0] unchanged to prevent that.
1336 	 */
1337 
1338 	if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS)) != 0) {
1339 		PROC_UNLOCK(p);
1340 		crfree(newcred);
1341 		goto out;
1342 	}
1343 
1344 	if (ngrp > 0) {
1345 		newcred->cr_ngroups = ngrp + 1;
1346 
1347 		bsd_gidset = newcred->cr_groups;
1348 		ngrp--;
1349 		while (ngrp >= 0) {
1350 			bsd_gidset[ngrp + 1] = linux_gidset[ngrp];
1351 			ngrp--;
1352 		}
1353 	} else
1354 		newcred->cr_ngroups = 1;
1355 
1356 	setsugid(p);
1357 	proc_set_cred(p, newcred);
1358 	PROC_UNLOCK(p);
1359 	crfree(oldcred);
1360 	error = 0;
1361 out:
1362 	free(linux_gidset, M_LINUX);
1363 	return (error);
1364 }
1365 
1366 int
1367 linux_getgroups(struct thread *td, struct linux_getgroups_args *args)
1368 {
1369 	struct ucred *cred;
1370 	l_gid_t *linux_gidset;
1371 	gid_t *bsd_gidset;
1372 	int bsd_gidsetsz, ngrp, error;
1373 
1374 	cred = td->td_ucred;
1375 	bsd_gidset = cred->cr_groups;
1376 	bsd_gidsetsz = cred->cr_ngroups - 1;
1377 
1378 	/*
1379 	 * cr_groups[0] holds egid. Returning the whole set
1380 	 * here will cause a duplicate. Exclude cr_groups[0]
1381 	 * to prevent that.
1382 	 */
1383 
1384 	if ((ngrp = args->gidsetsize) == 0) {
1385 		td->td_retval[0] = bsd_gidsetsz;
1386 		return (0);
1387 	}
1388 
1389 	if (ngrp < bsd_gidsetsz)
1390 		return (EINVAL);
1391 
1392 	ngrp = 0;
1393 	linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset),
1394 	    M_LINUX, M_WAITOK);
1395 	while (ngrp < bsd_gidsetsz) {
1396 		linux_gidset[ngrp] = bsd_gidset[ngrp + 1];
1397 		ngrp++;
1398 	}
1399 
1400 	error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t));
1401 	free(linux_gidset, M_LINUX);
1402 	if (error)
1403 		return (error);
1404 
1405 	td->td_retval[0] = ngrp;
1406 	return (0);
1407 }
1408 
1409 int
1410 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args)
1411 {
1412 	struct rlimit bsd_rlim;
1413 	struct l_rlimit rlim;
1414 	u_int which;
1415 	int error;
1416 
1417 #ifdef DEBUG
1418 	if (ldebug(setrlimit))
1419 		printf(ARGS(setrlimit, "%d, %p"),
1420 		    args->resource, (void *)args->rlim);
1421 #endif
1422 
1423 	if (args->resource >= LINUX_RLIM_NLIMITS)
1424 		return (EINVAL);
1425 
1426 	which = linux_to_bsd_resource[args->resource];
1427 	if (which == -1)
1428 		return (EINVAL);
1429 
1430 	error = copyin(args->rlim, &rlim, sizeof(rlim));
1431 	if (error)
1432 		return (error);
1433 
1434 	bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur;
1435 	bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max;
1436 	return (kern_setrlimit(td, which, &bsd_rlim));
1437 }
1438 
1439 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
1440 int
1441 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args)
1442 {
1443 	struct l_rlimit rlim;
1444 	struct rlimit bsd_rlim;
1445 	u_int which;
1446 
1447 #ifdef DEBUG
1448 	if (ldebug(old_getrlimit))
1449 		printf(ARGS(old_getrlimit, "%d, %p"),
1450 		    args->resource, (void *)args->rlim);
1451 #endif
1452 
1453 	if (args->resource >= LINUX_RLIM_NLIMITS)
1454 		return (EINVAL);
1455 
1456 	which = linux_to_bsd_resource[args->resource];
1457 	if (which == -1)
1458 		return (EINVAL);
1459 
1460 	lim_rlimit(td, which, &bsd_rlim);
1461 
1462 #ifdef COMPAT_LINUX32
1463 	rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur;
1464 	if (rlim.rlim_cur == UINT_MAX)
1465 		rlim.rlim_cur = INT_MAX;
1466 	rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max;
1467 	if (rlim.rlim_max == UINT_MAX)
1468 		rlim.rlim_max = INT_MAX;
1469 #else
1470 	rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur;
1471 	if (rlim.rlim_cur == ULONG_MAX)
1472 		rlim.rlim_cur = LONG_MAX;
1473 	rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max;
1474 	if (rlim.rlim_max == ULONG_MAX)
1475 		rlim.rlim_max = LONG_MAX;
1476 #endif
1477 	return (copyout(&rlim, args->rlim, sizeof(rlim)));
1478 }
1479 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
1480 
1481 int
1482 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args)
1483 {
1484 	struct l_rlimit rlim;
1485 	struct rlimit bsd_rlim;
1486 	u_int which;
1487 
1488 #ifdef DEBUG
1489 	if (ldebug(getrlimit))
1490 		printf(ARGS(getrlimit, "%d, %p"),
1491 		    args->resource, (void *)args->rlim);
1492 #endif
1493 
1494 	if (args->resource >= LINUX_RLIM_NLIMITS)
1495 		return (EINVAL);
1496 
1497 	which = linux_to_bsd_resource[args->resource];
1498 	if (which == -1)
1499 		return (EINVAL);
1500 
1501 	lim_rlimit(td, which, &bsd_rlim);
1502 
1503 	rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur;
1504 	rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max;
1505 	return (copyout(&rlim, args->rlim, sizeof(rlim)));
1506 }
1507 
1508 int
1509 linux_sched_setscheduler(struct thread *td,
1510     struct linux_sched_setscheduler_args *args)
1511 {
1512 	struct sched_param sched_param;
1513 	struct thread *tdt;
1514 	int error, policy;
1515 
1516 #ifdef DEBUG
1517 	if (ldebug(sched_setscheduler))
1518 		printf(ARGS(sched_setscheduler, "%d, %d, %p"),
1519 		    args->pid, args->policy, (const void *)args->param);
1520 #endif
1521 
1522 	switch (args->policy) {
1523 	case LINUX_SCHED_OTHER:
1524 		policy = SCHED_OTHER;
1525 		break;
1526 	case LINUX_SCHED_FIFO:
1527 		policy = SCHED_FIFO;
1528 		break;
1529 	case LINUX_SCHED_RR:
1530 		policy = SCHED_RR;
1531 		break;
1532 	default:
1533 		return (EINVAL);
1534 	}
1535 
1536 	error = copyin(args->param, &sched_param, sizeof(sched_param));
1537 	if (error)
1538 		return (error);
1539 
1540 	tdt = linux_tdfind(td, args->pid, -1);
1541 	if (tdt == NULL)
1542 		return (ESRCH);
1543 
1544 	error = kern_sched_setscheduler(td, tdt, policy, &sched_param);
1545 	PROC_UNLOCK(tdt->td_proc);
1546 	return (error);
1547 }
1548 
1549 int
1550 linux_sched_getscheduler(struct thread *td,
1551     struct linux_sched_getscheduler_args *args)
1552 {
1553 	struct thread *tdt;
1554 	int error, policy;
1555 
1556 #ifdef DEBUG
1557 	if (ldebug(sched_getscheduler))
1558 		printf(ARGS(sched_getscheduler, "%d"), args->pid);
1559 #endif
1560 
1561 	tdt = linux_tdfind(td, args->pid, -1);
1562 	if (tdt == NULL)
1563 		return (ESRCH);
1564 
1565 	error = kern_sched_getscheduler(td, tdt, &policy);
1566 	PROC_UNLOCK(tdt->td_proc);
1567 
1568 	switch (policy) {
1569 	case SCHED_OTHER:
1570 		td->td_retval[0] = LINUX_SCHED_OTHER;
1571 		break;
1572 	case SCHED_FIFO:
1573 		td->td_retval[0] = LINUX_SCHED_FIFO;
1574 		break;
1575 	case SCHED_RR:
1576 		td->td_retval[0] = LINUX_SCHED_RR;
1577 		break;
1578 	}
1579 	return (error);
1580 }
1581 
1582 int
1583 linux_sched_get_priority_max(struct thread *td,
1584     struct linux_sched_get_priority_max_args *args)
1585 {
1586 	struct sched_get_priority_max_args bsd;
1587 
1588 #ifdef DEBUG
1589 	if (ldebug(sched_get_priority_max))
1590 		printf(ARGS(sched_get_priority_max, "%d"), args->policy);
1591 #endif
1592 
1593 	switch (args->policy) {
1594 	case LINUX_SCHED_OTHER:
1595 		bsd.policy = SCHED_OTHER;
1596 		break;
1597 	case LINUX_SCHED_FIFO:
1598 		bsd.policy = SCHED_FIFO;
1599 		break;
1600 	case LINUX_SCHED_RR:
1601 		bsd.policy = SCHED_RR;
1602 		break;
1603 	default:
1604 		return (EINVAL);
1605 	}
1606 	return (sys_sched_get_priority_max(td, &bsd));
1607 }
1608 
1609 int
1610 linux_sched_get_priority_min(struct thread *td,
1611     struct linux_sched_get_priority_min_args *args)
1612 {
1613 	struct sched_get_priority_min_args bsd;
1614 
1615 #ifdef DEBUG
1616 	if (ldebug(sched_get_priority_min))
1617 		printf(ARGS(sched_get_priority_min, "%d"), args->policy);
1618 #endif
1619 
1620 	switch (args->policy) {
1621 	case LINUX_SCHED_OTHER:
1622 		bsd.policy = SCHED_OTHER;
1623 		break;
1624 	case LINUX_SCHED_FIFO:
1625 		bsd.policy = SCHED_FIFO;
1626 		break;
1627 	case LINUX_SCHED_RR:
1628 		bsd.policy = SCHED_RR;
1629 		break;
1630 	default:
1631 		return (EINVAL);
1632 	}
1633 	return (sys_sched_get_priority_min(td, &bsd));
1634 }
1635 
1636 #define REBOOT_CAD_ON	0x89abcdef
1637 #define REBOOT_CAD_OFF	0
1638 #define REBOOT_HALT	0xcdef0123
1639 #define REBOOT_RESTART	0x01234567
1640 #define REBOOT_RESTART2	0xA1B2C3D4
1641 #define REBOOT_POWEROFF	0x4321FEDC
1642 #define REBOOT_MAGIC1	0xfee1dead
1643 #define REBOOT_MAGIC2	0x28121969
1644 #define REBOOT_MAGIC2A	0x05121996
1645 #define REBOOT_MAGIC2B	0x16041998
1646 
1647 int
1648 linux_reboot(struct thread *td, struct linux_reboot_args *args)
1649 {
1650 	struct reboot_args bsd_args;
1651 
1652 #ifdef DEBUG
1653 	if (ldebug(reboot))
1654 		printf(ARGS(reboot, "0x%x"), args->cmd);
1655 #endif
1656 
1657 	if (args->magic1 != REBOOT_MAGIC1)
1658 		return (EINVAL);
1659 
1660 	switch (args->magic2) {
1661 	case REBOOT_MAGIC2:
1662 	case REBOOT_MAGIC2A:
1663 	case REBOOT_MAGIC2B:
1664 		break;
1665 	default:
1666 		return (EINVAL);
1667 	}
1668 
1669 	switch (args->cmd) {
1670 	case REBOOT_CAD_ON:
1671 	case REBOOT_CAD_OFF:
1672 		return (priv_check(td, PRIV_REBOOT));
1673 	case REBOOT_HALT:
1674 		bsd_args.opt = RB_HALT;
1675 		break;
1676 	case REBOOT_RESTART:
1677 	case REBOOT_RESTART2:
1678 		bsd_args.opt = 0;
1679 		break;
1680 	case REBOOT_POWEROFF:
1681 		bsd_args.opt = RB_POWEROFF;
1682 		break;
1683 	default:
1684 		return (EINVAL);
1685 	}
1686 	return (sys_reboot(td, &bsd_args));
1687 }
1688 
1689 
1690 /*
1691  * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify
1692  * td->td_retval[1] when COMPAT_43 is defined. This clobbers registers that
1693  * are assumed to be preserved. The following lightweight syscalls fixes
1694  * this. See also linux_getgid16() and linux_getuid16() in linux_uid16.c
1695  *
1696  * linux_getpid() - MP SAFE
1697  * linux_getgid() - MP SAFE
1698  * linux_getuid() - MP SAFE
1699  */
1700 
1701 int
1702 linux_getpid(struct thread *td, struct linux_getpid_args *args)
1703 {
1704 
1705 #ifdef DEBUG
1706 	if (ldebug(getpid))
1707 		printf(ARGS(getpid, ""));
1708 #endif
1709 	td->td_retval[0] = td->td_proc->p_pid;
1710 
1711 	return (0);
1712 }
1713 
1714 int
1715 linux_gettid(struct thread *td, struct linux_gettid_args *args)
1716 {
1717 	struct linux_emuldata *em;
1718 
1719 #ifdef DEBUG
1720 	if (ldebug(gettid))
1721 		printf(ARGS(gettid, ""));
1722 #endif
1723 
1724 	em = em_find(td);
1725 	KASSERT(em != NULL, ("gettid: emuldata not found.\n"));
1726 
1727 	td->td_retval[0] = em->em_tid;
1728 
1729 	return (0);
1730 }
1731 
1732 
1733 int
1734 linux_getppid(struct thread *td, struct linux_getppid_args *args)
1735 {
1736 
1737 #ifdef DEBUG
1738 	if (ldebug(getppid))
1739 		printf(ARGS(getppid, ""));
1740 #endif
1741 
1742 	td->td_retval[0] = kern_getppid(td);
1743 	return (0);
1744 }
1745 
1746 int
1747 linux_getgid(struct thread *td, struct linux_getgid_args *args)
1748 {
1749 
1750 #ifdef DEBUG
1751 	if (ldebug(getgid))
1752 		printf(ARGS(getgid, ""));
1753 #endif
1754 
1755 	td->td_retval[0] = td->td_ucred->cr_rgid;
1756 	return (0);
1757 }
1758 
1759 int
1760 linux_getuid(struct thread *td, struct linux_getuid_args *args)
1761 {
1762 
1763 #ifdef DEBUG
1764 	if (ldebug(getuid))
1765 		printf(ARGS(getuid, ""));
1766 #endif
1767 
1768 	td->td_retval[0] = td->td_ucred->cr_ruid;
1769 	return (0);
1770 }
1771 
1772 
1773 int
1774 linux_getsid(struct thread *td, struct linux_getsid_args *args)
1775 {
1776 	struct getsid_args bsd;
1777 
1778 #ifdef DEBUG
1779 	if (ldebug(getsid))
1780 		printf(ARGS(getsid, "%i"), args->pid);
1781 #endif
1782 
1783 	bsd.pid = args->pid;
1784 	return (sys_getsid(td, &bsd));
1785 }
1786 
1787 int
1788 linux_nosys(struct thread *td, struct nosys_args *ignore)
1789 {
1790 
1791 	return (ENOSYS);
1792 }
1793 
1794 int
1795 linux_getpriority(struct thread *td, struct linux_getpriority_args *args)
1796 {
1797 	struct getpriority_args bsd_args;
1798 	int error;
1799 
1800 #ifdef DEBUG
1801 	if (ldebug(getpriority))
1802 		printf(ARGS(getpriority, "%i, %i"), args->which, args->who);
1803 #endif
1804 
1805 	bsd_args.which = args->which;
1806 	bsd_args.who = args->who;
1807 	error = sys_getpriority(td, &bsd_args);
1808 	td->td_retval[0] = 20 - td->td_retval[0];
1809 	return (error);
1810 }
1811 
1812 int
1813 linux_sethostname(struct thread *td, struct linux_sethostname_args *args)
1814 {
1815 	int name[2];
1816 
1817 #ifdef DEBUG
1818 	if (ldebug(sethostname))
1819 		printf(ARGS(sethostname, "*, %i"), args->len);
1820 #endif
1821 
1822 	name[0] = CTL_KERN;
1823 	name[1] = KERN_HOSTNAME;
1824 	return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname,
1825 	    args->len, 0, 0));
1826 }
1827 
1828 int
1829 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args)
1830 {
1831 	int name[2];
1832 
1833 #ifdef DEBUG
1834 	if (ldebug(setdomainname))
1835 		printf(ARGS(setdomainname, "*, %i"), args->len);
1836 #endif
1837 
1838 	name[0] = CTL_KERN;
1839 	name[1] = KERN_NISDOMAINNAME;
1840 	return (userland_sysctl(td, name, 2, 0, 0, 0, args->name,
1841 	    args->len, 0, 0));
1842 }
1843 
1844 int
1845 linux_exit_group(struct thread *td, struct linux_exit_group_args *args)
1846 {
1847 
1848 #ifdef DEBUG
1849 	if (ldebug(exit_group))
1850 		printf(ARGS(exit_group, "%i"), args->error_code);
1851 #endif
1852 
1853 	LINUX_CTR2(exit_group, "thread(%d) (%d)", td->td_tid,
1854 	    args->error_code);
1855 
1856 	/*
1857 	 * XXX: we should send a signal to the parent if
1858 	 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?)
1859 	 * as it doesnt occur often.
1860 	 */
1861 	exit1(td, args->error_code, 0);
1862 		/* NOTREACHED */
1863 }
1864 
1865 #define _LINUX_CAPABILITY_VERSION_1  0x19980330
1866 #define _LINUX_CAPABILITY_VERSION_2  0x20071026
1867 #define _LINUX_CAPABILITY_VERSION_3  0x20080522
1868 
1869 struct l_user_cap_header {
1870 	l_int	version;
1871 	l_int	pid;
1872 };
1873 
1874 struct l_user_cap_data {
1875 	l_int	effective;
1876 	l_int	permitted;
1877 	l_int	inheritable;
1878 };
1879 
1880 int
1881 linux_capget(struct thread *td, struct linux_capget_args *uap)
1882 {
1883 	struct l_user_cap_header luch;
1884 	struct l_user_cap_data lucd[2];
1885 	int error, u32s;
1886 
1887 	if (uap->hdrp == NULL)
1888 		return (EFAULT);
1889 
1890 	error = copyin(uap->hdrp, &luch, sizeof(luch));
1891 	if (error != 0)
1892 		return (error);
1893 
1894 	switch (luch.version) {
1895 	case _LINUX_CAPABILITY_VERSION_1:
1896 		u32s = 1;
1897 		break;
1898 	case _LINUX_CAPABILITY_VERSION_2:
1899 	case _LINUX_CAPABILITY_VERSION_3:
1900 		u32s = 2;
1901 		break;
1902 	default:
1903 #ifdef DEBUG
1904 		if (ldebug(capget))
1905 			printf(LMSG("invalid capget capability version 0x%x"),
1906 			    luch.version);
1907 #endif
1908 		luch.version = _LINUX_CAPABILITY_VERSION_1;
1909 		error = copyout(&luch, uap->hdrp, sizeof(luch));
1910 		if (error)
1911 			return (error);
1912 		return (EINVAL);
1913 	}
1914 
1915 	if (luch.pid)
1916 		return (EPERM);
1917 
1918 	if (uap->datap) {
1919 		/*
1920 		 * The current implementation doesn't support setting
1921 		 * a capability (it's essentially a stub) so indicate
1922 		 * that no capabilities are currently set or available
1923 		 * to request.
1924 		 */
1925 		memset(&lucd, 0, u32s * sizeof(lucd[0]));
1926 		error = copyout(&lucd, uap->datap, u32s * sizeof(lucd[0]));
1927 	}
1928 
1929 	return (error);
1930 }
1931 
1932 int
1933 linux_capset(struct thread *td, struct linux_capset_args *uap)
1934 {
1935 	struct l_user_cap_header luch;
1936 	struct l_user_cap_data lucd[2];
1937 	int error, i, u32s;
1938 
1939 	if (uap->hdrp == NULL || uap->datap == NULL)
1940 		return (EFAULT);
1941 
1942 	error = copyin(uap->hdrp, &luch, sizeof(luch));
1943 	if (error != 0)
1944 		return (error);
1945 
1946 	switch (luch.version) {
1947 	case _LINUX_CAPABILITY_VERSION_1:
1948 		u32s = 1;
1949 		break;
1950 	case _LINUX_CAPABILITY_VERSION_2:
1951 	case _LINUX_CAPABILITY_VERSION_3:
1952 		u32s = 2;
1953 		break;
1954 	default:
1955 #ifdef DEBUG
1956 		if (ldebug(capset))
1957 			printf(LMSG("invalid capset capability version 0x%x"),
1958 			    luch.version);
1959 #endif
1960 		luch.version = _LINUX_CAPABILITY_VERSION_1;
1961 		error = copyout(&luch, uap->hdrp, sizeof(luch));
1962 		if (error)
1963 			return (error);
1964 		return (EINVAL);
1965 	}
1966 
1967 	if (luch.pid)
1968 		return (EPERM);
1969 
1970 	error = copyin(uap->datap, &lucd, u32s * sizeof(lucd[0]));
1971 	if (error != 0)
1972 		return (error);
1973 
1974 	/* We currently don't support setting any capabilities. */
1975 	for (i = 0; i < u32s; i++) {
1976 		if (lucd[i].effective || lucd[i].permitted ||
1977 		    lucd[i].inheritable) {
1978 			linux_msg(td,
1979 			    "capset[%d] effective=0x%x, permitted=0x%x, "
1980 			    "inheritable=0x%x is not implemented", i,
1981 			    (int)lucd[i].effective, (int)lucd[i].permitted,
1982 			    (int)lucd[i].inheritable);
1983 			return (EPERM);
1984 		}
1985 	}
1986 
1987 	return (0);
1988 }
1989 
1990 int
1991 linux_prctl(struct thread *td, struct linux_prctl_args *args)
1992 {
1993 	int error = 0, max_size;
1994 	struct proc *p = td->td_proc;
1995 	char comm[LINUX_MAX_COMM_LEN];
1996 	struct linux_emuldata *em;
1997 	int pdeath_signal;
1998 
1999 #ifdef DEBUG
2000 	if (ldebug(prctl))
2001 		printf(ARGS(prctl, "%d, %ju, %ju, %ju, %ju"), args->option,
2002 		    (uintmax_t)args->arg2, (uintmax_t)args->arg3,
2003 		    (uintmax_t)args->arg4, (uintmax_t)args->arg5);
2004 #endif
2005 
2006 	switch (args->option) {
2007 	case LINUX_PR_SET_PDEATHSIG:
2008 		if (!LINUX_SIG_VALID(args->arg2))
2009 			return (EINVAL);
2010 		em = em_find(td);
2011 		KASSERT(em != NULL, ("prctl: emuldata not found.\n"));
2012 		em->pdeath_signal = args->arg2;
2013 		break;
2014 	case LINUX_PR_GET_PDEATHSIG:
2015 		em = em_find(td);
2016 		KASSERT(em != NULL, ("prctl: emuldata not found.\n"));
2017 		pdeath_signal = em->pdeath_signal;
2018 		error = copyout(&pdeath_signal,
2019 		    (void *)(register_t)args->arg2,
2020 		    sizeof(pdeath_signal));
2021 		break;
2022 	case LINUX_PR_GET_KEEPCAPS:
2023 		/*
2024 		 * Indicate that we always clear the effective and
2025 		 * permitted capability sets when the user id becomes
2026 		 * non-zero (actually the capability sets are simply
2027 		 * always zero in the current implementation).
2028 		 */
2029 		td->td_retval[0] = 0;
2030 		break;
2031 	case LINUX_PR_SET_KEEPCAPS:
2032 		/*
2033 		 * Ignore requests to keep the effective and permitted
2034 		 * capability sets when the user id becomes non-zero.
2035 		 */
2036 		break;
2037 	case LINUX_PR_SET_NAME:
2038 		/*
2039 		 * To be on the safe side we need to make sure to not
2040 		 * overflow the size a Linux program expects. We already
2041 		 * do this here in the copyin, so that we don't need to
2042 		 * check on copyout.
2043 		 */
2044 		max_size = MIN(sizeof(comm), sizeof(p->p_comm));
2045 		error = copyinstr((void *)(register_t)args->arg2, comm,
2046 		    max_size, NULL);
2047 
2048 		/* Linux silently truncates the name if it is too long. */
2049 		if (error == ENAMETOOLONG) {
2050 			/*
2051 			 * XXX: copyinstr() isn't documented to populate the
2052 			 * array completely, so do a copyin() to be on the
2053 			 * safe side. This should be changed in case
2054 			 * copyinstr() is changed to guarantee this.
2055 			 */
2056 			error = copyin((void *)(register_t)args->arg2, comm,
2057 			    max_size - 1);
2058 			comm[max_size - 1] = '\0';
2059 		}
2060 		if (error)
2061 			return (error);
2062 
2063 		PROC_LOCK(p);
2064 		strlcpy(p->p_comm, comm, sizeof(p->p_comm));
2065 		PROC_UNLOCK(p);
2066 		break;
2067 	case LINUX_PR_GET_NAME:
2068 		PROC_LOCK(p);
2069 		strlcpy(comm, p->p_comm, sizeof(comm));
2070 		PROC_UNLOCK(p);
2071 		error = copyout(comm, (void *)(register_t)args->arg2,
2072 		    strlen(comm) + 1);
2073 		break;
2074 	default:
2075 		error = EINVAL;
2076 		break;
2077 	}
2078 
2079 	return (error);
2080 }
2081 
2082 int
2083 linux_sched_setparam(struct thread *td,
2084     struct linux_sched_setparam_args *uap)
2085 {
2086 	struct sched_param sched_param;
2087 	struct thread *tdt;
2088 	int error;
2089 
2090 #ifdef DEBUG
2091 	if (ldebug(sched_setparam))
2092 		printf(ARGS(sched_setparam, "%d, *"), uap->pid);
2093 #endif
2094 
2095 	error = copyin(uap->param, &sched_param, sizeof(sched_param));
2096 	if (error)
2097 		return (error);
2098 
2099 	tdt = linux_tdfind(td, uap->pid, -1);
2100 	if (tdt == NULL)
2101 		return (ESRCH);
2102 
2103 	error = kern_sched_setparam(td, tdt, &sched_param);
2104 	PROC_UNLOCK(tdt->td_proc);
2105 	return (error);
2106 }
2107 
2108 int
2109 linux_sched_getparam(struct thread *td,
2110     struct linux_sched_getparam_args *uap)
2111 {
2112 	struct sched_param sched_param;
2113 	struct thread *tdt;
2114 	int error;
2115 
2116 #ifdef DEBUG
2117 	if (ldebug(sched_getparam))
2118 		printf(ARGS(sched_getparam, "%d, *"), uap->pid);
2119 #endif
2120 
2121 	tdt = linux_tdfind(td, uap->pid, -1);
2122 	if (tdt == NULL)
2123 		return (ESRCH);
2124 
2125 	error = kern_sched_getparam(td, tdt, &sched_param);
2126 	PROC_UNLOCK(tdt->td_proc);
2127 	if (error == 0)
2128 		error = copyout(&sched_param, uap->param,
2129 		    sizeof(sched_param));
2130 	return (error);
2131 }
2132 
2133 /*
2134  * Get affinity of a process.
2135  */
2136 int
2137 linux_sched_getaffinity(struct thread *td,
2138     struct linux_sched_getaffinity_args *args)
2139 {
2140 	int error;
2141 	struct thread *tdt;
2142 
2143 #ifdef DEBUG
2144 	if (ldebug(sched_getaffinity))
2145 		printf(ARGS(sched_getaffinity, "%d, %d, *"), args->pid,
2146 		    args->len);
2147 #endif
2148 	if (args->len < sizeof(cpuset_t))
2149 		return (EINVAL);
2150 
2151 	tdt = linux_tdfind(td, args->pid, -1);
2152 	if (tdt == NULL)
2153 		return (ESRCH);
2154 
2155 	PROC_UNLOCK(tdt->td_proc);
2156 
2157 	error = kern_cpuset_getaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID,
2158 	    tdt->td_tid, sizeof(cpuset_t), (cpuset_t *)args->user_mask_ptr);
2159 	if (error == 0)
2160 		td->td_retval[0] = sizeof(cpuset_t);
2161 
2162 	return (error);
2163 }
2164 
2165 /*
2166  *  Set affinity of a process.
2167  */
2168 int
2169 linux_sched_setaffinity(struct thread *td,
2170     struct linux_sched_setaffinity_args *args)
2171 {
2172 	struct thread *tdt;
2173 
2174 #ifdef DEBUG
2175 	if (ldebug(sched_setaffinity))
2176 		printf(ARGS(sched_setaffinity, "%d, %d, *"), args->pid,
2177 		    args->len);
2178 #endif
2179 	if (args->len < sizeof(cpuset_t))
2180 		return (EINVAL);
2181 
2182 	tdt = linux_tdfind(td, args->pid, -1);
2183 	if (tdt == NULL)
2184 		return (ESRCH);
2185 
2186 	PROC_UNLOCK(tdt->td_proc);
2187 
2188 	return (kern_cpuset_setaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID,
2189 	    tdt->td_tid, sizeof(cpuset_t), (cpuset_t *) args->user_mask_ptr));
2190 }
2191 
2192 struct linux_rlimit64 {
2193 	uint64_t	rlim_cur;
2194 	uint64_t	rlim_max;
2195 };
2196 
2197 int
2198 linux_prlimit64(struct thread *td, struct linux_prlimit64_args *args)
2199 {
2200 	struct rlimit rlim, nrlim;
2201 	struct linux_rlimit64 lrlim;
2202 	struct proc *p;
2203 	u_int which;
2204 	int flags;
2205 	int error;
2206 
2207 #ifdef DEBUG
2208 	if (ldebug(prlimit64))
2209 		printf(ARGS(prlimit64, "%d, %d, %p, %p"), args->pid,
2210 		    args->resource, (void *)args->new, (void *)args->old);
2211 #endif
2212 
2213 	if (args->resource >= LINUX_RLIM_NLIMITS)
2214 		return (EINVAL);
2215 
2216 	which = linux_to_bsd_resource[args->resource];
2217 	if (which == -1)
2218 		return (EINVAL);
2219 
2220 	if (args->new != NULL) {
2221 		/*
2222 		 * Note. Unlike FreeBSD where rlim is signed 64-bit Linux
2223 		 * rlim is unsigned 64-bit. FreeBSD treats negative limits
2224 		 * as INFINITY so we do not need a conversion even.
2225 		 */
2226 		error = copyin(args->new, &nrlim, sizeof(nrlim));
2227 		if (error != 0)
2228 			return (error);
2229 	}
2230 
2231 	flags = PGET_HOLD | PGET_NOTWEXIT;
2232 	if (args->new != NULL)
2233 		flags |= PGET_CANDEBUG;
2234 	else
2235 		flags |= PGET_CANSEE;
2236 	error = pget(args->pid, flags, &p);
2237 	if (error != 0)
2238 		return (error);
2239 
2240 	if (args->old != NULL) {
2241 		PROC_LOCK(p);
2242 		lim_rlimit_proc(p, which, &rlim);
2243 		PROC_UNLOCK(p);
2244 		if (rlim.rlim_cur == RLIM_INFINITY)
2245 			lrlim.rlim_cur = LINUX_RLIM_INFINITY;
2246 		else
2247 			lrlim.rlim_cur = rlim.rlim_cur;
2248 		if (rlim.rlim_max == RLIM_INFINITY)
2249 			lrlim.rlim_max = LINUX_RLIM_INFINITY;
2250 		else
2251 			lrlim.rlim_max = rlim.rlim_max;
2252 		error = copyout(&lrlim, args->old, sizeof(lrlim));
2253 		if (error != 0)
2254 			goto out;
2255 	}
2256 
2257 	if (args->new != NULL)
2258 		error = kern_proc_setrlimit(td, p, which, &nrlim);
2259 
2260  out:
2261 	PRELE(p);
2262 	return (error);
2263 }
2264 
2265 int
2266 linux_pselect6(struct thread *td, struct linux_pselect6_args *args)
2267 {
2268 	struct timeval utv, tv0, tv1, *tvp;
2269 	struct l_pselect6arg lpse6;
2270 	struct l_timespec lts;
2271 	struct timespec uts;
2272 	l_sigset_t l_ss;
2273 	sigset_t *ssp;
2274 	sigset_t ss;
2275 	int error;
2276 
2277 	ssp = NULL;
2278 	if (args->sig != NULL) {
2279 		error = copyin(args->sig, &lpse6, sizeof(lpse6));
2280 		if (error != 0)
2281 			return (error);
2282 		if (lpse6.ss_len != sizeof(l_ss))
2283 			return (EINVAL);
2284 		if (lpse6.ss != 0) {
2285 			error = copyin(PTRIN(lpse6.ss), &l_ss,
2286 			    sizeof(l_ss));
2287 			if (error != 0)
2288 				return (error);
2289 			linux_to_bsd_sigset(&l_ss, &ss);
2290 			ssp = &ss;
2291 		}
2292 	}
2293 
2294 	/*
2295 	 * Currently glibc changes nanosecond number to microsecond.
2296 	 * This mean losing precision but for now it is hardly seen.
2297 	 */
2298 	if (args->tsp != NULL) {
2299 		error = copyin(args->tsp, &lts, sizeof(lts));
2300 		if (error != 0)
2301 			return (error);
2302 		error = linux_to_native_timespec(&uts, &lts);
2303 		if (error != 0)
2304 			return (error);
2305 
2306 		TIMESPEC_TO_TIMEVAL(&utv, &uts);
2307 		if (itimerfix(&utv))
2308 			return (EINVAL);
2309 
2310 		microtime(&tv0);
2311 		tvp = &utv;
2312 	} else
2313 		tvp = NULL;
2314 
2315 	error = kern_pselect(td, args->nfds, args->readfds, args->writefds,
2316 	    args->exceptfds, tvp, ssp, LINUX_NFDBITS);
2317 
2318 	if (error == 0 && args->tsp != NULL) {
2319 		if (td->td_retval[0] != 0) {
2320 			/*
2321 			 * Compute how much time was left of the timeout,
2322 			 * by subtracting the current time and the time
2323 			 * before we started the call, and subtracting
2324 			 * that result from the user-supplied value.
2325 			 */
2326 
2327 			microtime(&tv1);
2328 			timevalsub(&tv1, &tv0);
2329 			timevalsub(&utv, &tv1);
2330 			if (utv.tv_sec < 0)
2331 				timevalclear(&utv);
2332 		} else
2333 			timevalclear(&utv);
2334 
2335 		TIMEVAL_TO_TIMESPEC(&utv, &uts);
2336 
2337 		error = native_to_linux_timespec(&lts, &uts);
2338 		if (error == 0)
2339 			error = copyout(&lts, args->tsp, sizeof(lts));
2340 	}
2341 
2342 	return (error);
2343 }
2344 
2345 int
2346 linux_ppoll(struct thread *td, struct linux_ppoll_args *args)
2347 {
2348 	struct timespec ts0, ts1;
2349 	struct l_timespec lts;
2350 	struct timespec uts, *tsp;
2351 	l_sigset_t l_ss;
2352 	sigset_t *ssp;
2353 	sigset_t ss;
2354 	int error;
2355 
2356 	if (args->sset != NULL) {
2357 		if (args->ssize != sizeof(l_ss))
2358 			return (EINVAL);
2359 		error = copyin(args->sset, &l_ss, sizeof(l_ss));
2360 		if (error)
2361 			return (error);
2362 		linux_to_bsd_sigset(&l_ss, &ss);
2363 		ssp = &ss;
2364 	} else
2365 		ssp = NULL;
2366 	if (args->tsp != NULL) {
2367 		error = copyin(args->tsp, &lts, sizeof(lts));
2368 		if (error)
2369 			return (error);
2370 		error = linux_to_native_timespec(&uts, &lts);
2371 		if (error != 0)
2372 			return (error);
2373 
2374 		nanotime(&ts0);
2375 		tsp = &uts;
2376 	} else
2377 		tsp = NULL;
2378 
2379 	error = kern_poll(td, args->fds, args->nfds, tsp, ssp);
2380 
2381 	if (error == 0 && args->tsp != NULL) {
2382 		if (td->td_retval[0]) {
2383 			nanotime(&ts1);
2384 			timespecsub(&ts1, &ts0, &ts1);
2385 			timespecsub(&uts, &ts1, &uts);
2386 			if (uts.tv_sec < 0)
2387 				timespecclear(&uts);
2388 		} else
2389 			timespecclear(&uts);
2390 
2391 		error = native_to_linux_timespec(&lts, &uts);
2392 		if (error == 0)
2393 			error = copyout(&lts, args->tsp, sizeof(lts));
2394 	}
2395 
2396 	return (error);
2397 }
2398 
2399 #if defined(DEBUG) || defined(KTR)
2400 /* XXX: can be removed when every ldebug(...) and KTR stuff are removed. */
2401 
2402 #ifdef COMPAT_LINUX32
2403 #define	L_MAXSYSCALL	LINUX32_SYS_MAXSYSCALL
2404 #else
2405 #define	L_MAXSYSCALL	LINUX_SYS_MAXSYSCALL
2406 #endif
2407 
2408 u_char linux_debug_map[howmany(L_MAXSYSCALL, sizeof(u_char))];
2409 
2410 static int
2411 linux_debug(int syscall, int toggle, int global)
2412 {
2413 
2414 	if (global) {
2415 		char c = toggle ? 0 : 0xff;
2416 
2417 		memset(linux_debug_map, c, sizeof(linux_debug_map));
2418 		return (0);
2419 	}
2420 	if (syscall < 0 || syscall >= L_MAXSYSCALL)
2421 		return (EINVAL);
2422 	if (toggle)
2423 		clrbit(linux_debug_map, syscall);
2424 	else
2425 		setbit(linux_debug_map, syscall);
2426 	return (0);
2427 }
2428 #undef L_MAXSYSCALL
2429 
2430 /*
2431  * Usage: sysctl linux.debug=<syscall_nr>.<0/1>
2432  *
2433  *    E.g.: sysctl linux.debug=21.0
2434  *
2435  * As a special case, syscall "all" will apply to all syscalls globally.
2436  */
2437 #define LINUX_MAX_DEBUGSTR	16
2438 int
2439 linux_sysctl_debug(SYSCTL_HANDLER_ARGS)
2440 {
2441 	char value[LINUX_MAX_DEBUGSTR], *p;
2442 	int error, sysc, toggle;
2443 	int global = 0;
2444 
2445 	value[0] = '\0';
2446 	error = sysctl_handle_string(oidp, value, LINUX_MAX_DEBUGSTR, req);
2447 	if (error || req->newptr == NULL)
2448 		return (error);
2449 	for (p = value; *p != '\0' && *p != '.'; p++);
2450 	if (*p == '\0')
2451 		return (EINVAL);
2452 	*p++ = '\0';
2453 	sysc = strtol(value, NULL, 0);
2454 	toggle = strtol(p, NULL, 0);
2455 	if (strcmp(value, "all") == 0)
2456 		global = 1;
2457 	error = linux_debug(sysc, toggle, global);
2458 	return (error);
2459 }
2460 
2461 #endif /* DEBUG || KTR */
2462 
2463 int
2464 linux_sched_rr_get_interval(struct thread *td,
2465     struct linux_sched_rr_get_interval_args *uap)
2466 {
2467 	struct timespec ts;
2468 	struct l_timespec lts;
2469 	struct thread *tdt;
2470 	int error;
2471 
2472 	/*
2473 	 * According to man in case the invalid pid specified
2474 	 * EINVAL should be returned.
2475 	 */
2476 	if (uap->pid < 0)
2477 		return (EINVAL);
2478 
2479 	tdt = linux_tdfind(td, uap->pid, -1);
2480 	if (tdt == NULL)
2481 		return (ESRCH);
2482 
2483 	error = kern_sched_rr_get_interval_td(td, tdt, &ts);
2484 	PROC_UNLOCK(tdt->td_proc);
2485 	if (error != 0)
2486 		return (error);
2487 	error = native_to_linux_timespec(&lts, &ts);
2488 	if (error != 0)
2489 		return (error);
2490 	return (copyout(&lts, uap->interval, sizeof(lts)));
2491 }
2492 
2493 /*
2494  * In case when the Linux thread is the initial thread in
2495  * the thread group thread id is equal to the process id.
2496  * Glibc depends on this magic (assert in pthread_getattr_np.c).
2497  */
2498 struct thread *
2499 linux_tdfind(struct thread *td, lwpid_t tid, pid_t pid)
2500 {
2501 	struct linux_emuldata *em;
2502 	struct thread *tdt;
2503 	struct proc *p;
2504 
2505 	tdt = NULL;
2506 	if (tid == 0 || tid == td->td_tid) {
2507 		tdt = td;
2508 		PROC_LOCK(tdt->td_proc);
2509 	} else if (tid > PID_MAX)
2510 		tdt = tdfind(tid, pid);
2511 	else {
2512 		/*
2513 		 * Initial thread where the tid equal to the pid.
2514 		 */
2515 		p = pfind(tid);
2516 		if (p != NULL) {
2517 			if (SV_PROC_ABI(p) != SV_ABI_LINUX) {
2518 				/*
2519 				 * p is not a Linuxulator process.
2520 				 */
2521 				PROC_UNLOCK(p);
2522 				return (NULL);
2523 			}
2524 			FOREACH_THREAD_IN_PROC(p, tdt) {
2525 				em = em_find(tdt);
2526 				if (tid == em->em_tid)
2527 					return (tdt);
2528 			}
2529 			PROC_UNLOCK(p);
2530 		}
2531 		return (NULL);
2532 	}
2533 
2534 	return (tdt);
2535 }
2536 
2537 void
2538 linux_to_bsd_waitopts(int options, int *bsdopts)
2539 {
2540 
2541 	if (options & LINUX_WNOHANG)
2542 		*bsdopts |= WNOHANG;
2543 	if (options & LINUX_WUNTRACED)
2544 		*bsdopts |= WUNTRACED;
2545 	if (options & LINUX_WEXITED)
2546 		*bsdopts |= WEXITED;
2547 	if (options & LINUX_WCONTINUED)
2548 		*bsdopts |= WCONTINUED;
2549 	if (options & LINUX_WNOWAIT)
2550 		*bsdopts |= WNOWAIT;
2551 
2552 	if (options & __WCLONE)
2553 		*bsdopts |= WLINUXCLONE;
2554 }
2555 
2556 int
2557 linux_getrandom(struct thread *td, struct linux_getrandom_args *args)
2558 {
2559 	struct uio uio;
2560 	struct iovec iov;
2561 	int error;
2562 
2563 	if (args->flags & ~(LINUX_GRND_NONBLOCK|LINUX_GRND_RANDOM))
2564 		return (EINVAL);
2565 	if (args->count > INT_MAX)
2566 		args->count = INT_MAX;
2567 
2568 	iov.iov_base = args->buf;
2569 	iov.iov_len = args->count;
2570 
2571 	uio.uio_iov = &iov;
2572 	uio.uio_iovcnt = 1;
2573 	uio.uio_resid = iov.iov_len;
2574 	uio.uio_segflg = UIO_USERSPACE;
2575 	uio.uio_rw = UIO_READ;
2576 	uio.uio_td = td;
2577 
2578 	error = read_random_uio(&uio, args->flags & LINUX_GRND_NONBLOCK);
2579 	if (error == 0)
2580 		td->td_retval[0] = args->count - uio.uio_resid;
2581 	return (error);
2582 }
2583 
2584 int
2585 linux_mincore(struct thread *td, struct linux_mincore_args *args)
2586 {
2587 
2588 	/* Needs to be page-aligned */
2589 	if (args->start & PAGE_MASK)
2590 		return (EINVAL);
2591 	return (kern_mincore(td, args->start, args->len, args->vec));
2592 }
2593