xref: /freebsd/sys/amd64/amd64/sys_machdep.c (revision 8a0a413e)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2003 Peter Wemm.
5  * Copyright (c) 1990 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)sys_machdep.c	5.5 (Berkeley) 1/19/91
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_capsicum.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/capsicum.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mutex.h>
47 #include <sys/priv.h>
48 #include <sys/proc.h>
49 #include <sys/smp.h>
50 #include <sys/sysproto.h>
51 #include <sys/uio.h>
52 
53 #include <vm/vm.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_kern.h>		/* for kernel_map */
56 #include <vm/vm_extern.h>
57 
58 #include <machine/frame.h>
59 #include <machine/md_var.h>
60 #include <machine/pcb.h>
61 #include <machine/specialreg.h>
62 #include <machine/sysarch.h>
63 #include <machine/tss.h>
64 #include <machine/vmparam.h>
65 
66 #include <security/audit/audit.h>
67 
68 #define	MAX_LD		8192
69 
70 int max_ldt_segment = 512;
71 SYSCTL_INT(_machdep, OID_AUTO, max_ldt_segment, CTLFLAG_RDTUN,
72     &max_ldt_segment, 0,
73     "Maximum number of allowed LDT segments in the single address space");
74 
75 static void
76 max_ldt_segment_init(void *arg __unused)
77 {
78 
79 	if (max_ldt_segment <= 0)
80 		max_ldt_segment = 1;
81 	if (max_ldt_segment > MAX_LD)
82 		max_ldt_segment = MAX_LD;
83 }
84 SYSINIT(maxldt, SI_SUB_VM_CONF, SI_ORDER_ANY, max_ldt_segment_init, NULL);
85 
86 static void user_ldt_derefl(struct proc_ldt *pldt);
87 
88 #ifndef _SYS_SYSPROTO_H_
89 struct sysarch_args {
90 	int op;
91 	char *parms;
92 };
93 #endif
94 
95 int
96 sysarch_ldt(struct thread *td, struct sysarch_args *uap, int uap_space)
97 {
98 	struct i386_ldt_args *largs, la;
99 	struct user_segment_descriptor *lp;
100 	int error = 0;
101 
102 	/*
103 	 * XXXKIB check that the BSM generation code knows to encode
104 	 * the op argument.
105 	 */
106 	AUDIT_ARG_CMD(uap->op);
107 	if (uap_space == UIO_USERSPACE) {
108 		error = copyin(uap->parms, &la, sizeof(struct i386_ldt_args));
109 		if (error != 0)
110 			return (error);
111 		largs = &la;
112 	} else
113 		largs = (struct i386_ldt_args *)uap->parms;
114 
115 	switch (uap->op) {
116 	case I386_GET_LDT:
117 		error = amd64_get_ldt(td, largs);
118 		break;
119 	case I386_SET_LDT:
120 		if (largs->descs != NULL && largs->num > max_ldt_segment)
121 			return (EINVAL);
122 		set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
123 		if (largs->descs != NULL) {
124 			lp = malloc(largs->num * sizeof(struct
125 			    user_segment_descriptor), M_TEMP, M_WAITOK);
126 			error = copyin(largs->descs, lp, largs->num *
127 			    sizeof(struct user_segment_descriptor));
128 			if (error == 0)
129 				error = amd64_set_ldt(td, largs, lp);
130 			free(lp, M_TEMP);
131 		} else {
132 			error = amd64_set_ldt(td, largs, NULL);
133 		}
134 		break;
135 	}
136 	return (error);
137 }
138 
139 void
140 update_gdt_gsbase(struct thread *td, uint32_t base)
141 {
142 	struct user_segment_descriptor *sd;
143 
144 	if (td != curthread)
145 		return;
146 	set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
147 	critical_enter();
148 	sd = PCPU_GET(gs32p);
149 	sd->sd_lobase = base & 0xffffff;
150 	sd->sd_hibase = (base >> 24) & 0xff;
151 	critical_exit();
152 }
153 
154 void
155 update_gdt_fsbase(struct thread *td, uint32_t base)
156 {
157 	struct user_segment_descriptor *sd;
158 
159 	if (td != curthread)
160 		return;
161 	set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
162 	critical_enter();
163 	sd = PCPU_GET(fs32p);
164 	sd->sd_lobase = base & 0xffffff;
165 	sd->sd_hibase = (base >> 24) & 0xff;
166 	critical_exit();
167 }
168 
169 int
170 sysarch(struct thread *td, struct sysarch_args *uap)
171 {
172 	int error = 0;
173 	struct pcb *pcb = curthread->td_pcb;
174 	uint32_t i386base;
175 	uint64_t a64base;
176 	struct i386_ioperm_args iargs;
177 	struct i386_get_xfpustate i386xfpu;
178 	struct amd64_get_xfpustate a64xfpu;
179 
180 #ifdef CAPABILITY_MODE
181 	/*
182 	 * When adding new operations, add a new case statement here to
183 	 * explicitly indicate whether or not the operation is safe to
184 	 * perform in capability mode.
185 	 */
186 	if (IN_CAPABILITY_MODE(td)) {
187 		switch (uap->op) {
188 		case I386_GET_LDT:
189 		case I386_SET_LDT:
190 		case I386_GET_IOPERM:
191 		case I386_GET_FSBASE:
192 		case I386_SET_FSBASE:
193 		case I386_GET_GSBASE:
194 		case I386_SET_GSBASE:
195 		case I386_GET_XFPUSTATE:
196 		case AMD64_GET_FSBASE:
197 		case AMD64_SET_FSBASE:
198 		case AMD64_GET_GSBASE:
199 		case AMD64_SET_GSBASE:
200 		case AMD64_GET_XFPUSTATE:
201 			break;
202 
203 		case I386_SET_IOPERM:
204 		default:
205 #ifdef KTRACE
206 			if (KTRPOINT(td, KTR_CAPFAIL))
207 				ktrcapfail(CAPFAIL_SYSCALL, NULL, NULL);
208 #endif
209 			return (ECAPMODE);
210 		}
211 	}
212 #endif
213 
214 	if (uap->op == I386_GET_LDT || uap->op == I386_SET_LDT)
215 		return (sysarch_ldt(td, uap, UIO_USERSPACE));
216 	/*
217 	 * XXXKIB check that the BSM generation code knows to encode
218 	 * the op argument.
219 	 */
220 	AUDIT_ARG_CMD(uap->op);
221 	switch (uap->op) {
222 	case I386_GET_IOPERM:
223 	case I386_SET_IOPERM:
224 		if ((error = copyin(uap->parms, &iargs,
225 		    sizeof(struct i386_ioperm_args))) != 0)
226 			return (error);
227 		break;
228 	case I386_GET_XFPUSTATE:
229 		if ((error = copyin(uap->parms, &i386xfpu,
230 		    sizeof(struct i386_get_xfpustate))) != 0)
231 			return (error);
232 		a64xfpu.addr = (void *)(uintptr_t)i386xfpu.addr;
233 		a64xfpu.len = i386xfpu.len;
234 		break;
235 	case AMD64_GET_XFPUSTATE:
236 		if ((error = copyin(uap->parms, &a64xfpu,
237 		    sizeof(struct amd64_get_xfpustate))) != 0)
238 			return (error);
239 		break;
240 	default:
241 		break;
242 	}
243 
244 	switch (uap->op) {
245 	case I386_GET_IOPERM:
246 		error = amd64_get_ioperm(td, &iargs);
247 		if (error == 0)
248 			error = copyout(&iargs, uap->parms,
249 			    sizeof(struct i386_ioperm_args));
250 		break;
251 	case I386_SET_IOPERM:
252 		error = amd64_set_ioperm(td, &iargs);
253 		break;
254 	case I386_GET_FSBASE:
255 		update_pcb_bases(pcb);
256 		i386base = pcb->pcb_fsbase;
257 		error = copyout(&i386base, uap->parms, sizeof(i386base));
258 		break;
259 	case I386_SET_FSBASE:
260 		error = copyin(uap->parms, &i386base, sizeof(i386base));
261 		if (!error) {
262 			set_pcb_flags(pcb, PCB_FULL_IRET);
263 			pcb->pcb_fsbase = i386base;
264 			td->td_frame->tf_fs = _ufssel;
265 			update_gdt_fsbase(td, i386base);
266 		}
267 		break;
268 	case I386_GET_GSBASE:
269 		update_pcb_bases(pcb);
270 		i386base = pcb->pcb_gsbase;
271 		error = copyout(&i386base, uap->parms, sizeof(i386base));
272 		break;
273 	case I386_SET_GSBASE:
274 		error = copyin(uap->parms, &i386base, sizeof(i386base));
275 		if (!error) {
276 			set_pcb_flags(pcb, PCB_FULL_IRET);
277 			pcb->pcb_gsbase = i386base;
278 			td->td_frame->tf_gs = _ugssel;
279 			update_gdt_gsbase(td, i386base);
280 		}
281 		break;
282 	case AMD64_GET_FSBASE:
283 		update_pcb_bases(pcb);
284 		error = copyout(&pcb->pcb_fsbase, uap->parms,
285 		    sizeof(pcb->pcb_fsbase));
286 		break;
287 
288 	case AMD64_SET_FSBASE:
289 		error = copyin(uap->parms, &a64base, sizeof(a64base));
290 		if (!error) {
291 			if (a64base < VM_MAXUSER_ADDRESS) {
292 				set_pcb_flags(pcb, PCB_FULL_IRET);
293 				pcb->pcb_fsbase = a64base;
294 				td->td_frame->tf_fs = _ufssel;
295 			} else
296 				error = EINVAL;
297 		}
298 		break;
299 
300 	case AMD64_GET_GSBASE:
301 		update_pcb_bases(pcb);
302 		error = copyout(&pcb->pcb_gsbase, uap->parms,
303 		    sizeof(pcb->pcb_gsbase));
304 		break;
305 
306 	case AMD64_SET_GSBASE:
307 		error = copyin(uap->parms, &a64base, sizeof(a64base));
308 		if (!error) {
309 			if (a64base < VM_MAXUSER_ADDRESS) {
310 				set_pcb_flags(pcb, PCB_FULL_IRET);
311 				pcb->pcb_gsbase = a64base;
312 				td->td_frame->tf_gs = _ugssel;
313 			} else
314 				error = EINVAL;
315 		}
316 		break;
317 
318 	case I386_GET_XFPUSTATE:
319 	case AMD64_GET_XFPUSTATE:
320 		if (a64xfpu.len > cpu_max_ext_state_size -
321 		    sizeof(struct savefpu))
322 			return (EINVAL);
323 		fpugetregs(td);
324 		error = copyout((char *)(get_pcb_user_save_td(td) + 1),
325 		    a64xfpu.addr, a64xfpu.len);
326 		break;
327 
328 	default:
329 		error = EINVAL;
330 		break;
331 	}
332 	return (error);
333 }
334 
335 int
336 amd64_set_ioperm(td, uap)
337 	struct thread *td;
338 	struct i386_ioperm_args *uap;
339 {
340 	char *iomap;
341 	struct amd64tss *tssp;
342 	struct system_segment_descriptor *tss_sd;
343 	struct pcb *pcb;
344 	u_int i;
345 	int error;
346 
347 	if ((error = priv_check(td, PRIV_IO)) != 0)
348 		return (error);
349 	if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
350 		return (error);
351 	if (uap->start > uap->start + uap->length ||
352 	    uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
353 		return (EINVAL);
354 
355 	/*
356 	 * XXX
357 	 * While this is restricted to root, we should probably figure out
358 	 * whether any other driver is using this i/o address, as so not to
359 	 * cause confusion.  This probably requires a global 'usage registry'.
360 	 */
361 	pcb = td->td_pcb;
362 	if (pcb->pcb_tssp == NULL) {
363 		tssp = (struct amd64tss *)kmem_malloc(kernel_arena,
364 		    ctob(IOPAGES+1), M_WAITOK);
365 		iomap = (char *)&tssp[1];
366 		memset(iomap, 0xff, IOPERM_BITMAP_SIZE);
367 		critical_enter();
368 		/* Takes care of tss_rsp0. */
369 		memcpy(tssp, &common_tss[PCPU_GET(cpuid)],
370 		    sizeof(struct amd64tss));
371 		tssp->tss_iobase = sizeof(*tssp);
372 		pcb->pcb_tssp = tssp;
373 		tss_sd = PCPU_GET(tss);
374 		tss_sd->sd_lobase = (u_long)tssp & 0xffffff;
375 		tss_sd->sd_hibase = ((u_long)tssp >> 24) & 0xfffffffffful;
376 		tss_sd->sd_type = SDT_SYSTSS;
377 		ltr(GSEL(GPROC0_SEL, SEL_KPL));
378 		PCPU_SET(tssp, tssp);
379 		critical_exit();
380 	} else
381 		iomap = (char *)&pcb->pcb_tssp[1];
382 	for (i = uap->start; i < uap->start + uap->length; i++) {
383 		if (uap->enable)
384 			iomap[i >> 3] &= ~(1 << (i & 7));
385 		else
386 			iomap[i >> 3] |= (1 << (i & 7));
387 	}
388 	return (error);
389 }
390 
391 int
392 amd64_get_ioperm(td, uap)
393 	struct thread *td;
394 	struct i386_ioperm_args *uap;
395 {
396 	int i, state;
397 	char *iomap;
398 
399 	if (uap->start >= IOPAGES * PAGE_SIZE * NBBY)
400 		return (EINVAL);
401 	if (td->td_pcb->pcb_tssp == NULL) {
402 		uap->length = 0;
403 		goto done;
404 	}
405 
406 	iomap = (char *)&td->td_pcb->pcb_tssp[1];
407 
408 	i = uap->start;
409 	state = (iomap[i >> 3] >> (i & 7)) & 1;
410 	uap->enable = !state;
411 	uap->length = 1;
412 
413 	for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) {
414 		if (state != ((iomap[i >> 3] >> (i & 7)) & 1))
415 			break;
416 		uap->length++;
417 	}
418 
419 done:
420 	return (0);
421 }
422 
423 /*
424  * Update the GDT entry pointing to the LDT to point to the LDT of the
425  * current process.
426  */
427 static void
428 set_user_ldt(struct mdproc *mdp)
429 {
430 
431 	*PCPU_GET(ldt) = mdp->md_ldt_sd;
432 	lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
433 }
434 
435 static void
436 set_user_ldt_rv(struct vmspace *vmsp)
437 {
438 	struct thread *td;
439 
440 	td = curthread;
441 	if (vmsp != td->td_proc->p_vmspace)
442 		return;
443 
444 	set_user_ldt(&td->td_proc->p_md);
445 }
446 
447 struct proc_ldt *
448 user_ldt_alloc(struct proc *p, int force)
449 {
450 	struct proc_ldt *pldt, *new_ldt;
451 	struct mdproc *mdp;
452 	struct soft_segment_descriptor sldt;
453 
454 	mtx_assert(&dt_lock, MA_OWNED);
455 	mdp = &p->p_md;
456 	if (!force && mdp->md_ldt != NULL)
457 		return (mdp->md_ldt);
458 	mtx_unlock(&dt_lock);
459 	new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK);
460 	new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
461 	     max_ldt_segment * sizeof(struct user_segment_descriptor),
462 	     M_WAITOK | M_ZERO);
463 	new_ldt->ldt_refcnt = 1;
464 	sldt.ssd_base = (uint64_t)new_ldt->ldt_base;
465 	sldt.ssd_limit = max_ldt_segment *
466 	    sizeof(struct user_segment_descriptor) - 1;
467 	sldt.ssd_type = SDT_SYSLDT;
468 	sldt.ssd_dpl = SEL_KPL;
469 	sldt.ssd_p = 1;
470 	sldt.ssd_long = 0;
471 	sldt.ssd_def32 = 0;
472 	sldt.ssd_gran = 0;
473 	mtx_lock(&dt_lock);
474 	pldt = mdp->md_ldt;
475 	if (pldt != NULL && !force) {
476 		kmem_free(kernel_arena, (vm_offset_t)new_ldt->ldt_base,
477 		    max_ldt_segment * sizeof(struct user_segment_descriptor));
478 		free(new_ldt, M_SUBPROC);
479 		return (pldt);
480 	}
481 
482 	if (pldt != NULL) {
483 		bcopy(pldt->ldt_base, new_ldt->ldt_base, max_ldt_segment *
484 		    sizeof(struct user_segment_descriptor));
485 		user_ldt_derefl(pldt);
486 	}
487 	critical_enter();
488 	ssdtosyssd(&sldt, &p->p_md.md_ldt_sd);
489 	atomic_thread_fence_rel();
490 	mdp->md_ldt = new_ldt;
491 	critical_exit();
492 	smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv, NULL,
493 	    p->p_vmspace);
494 
495 	return (mdp->md_ldt);
496 }
497 
498 void
499 user_ldt_free(struct thread *td)
500 {
501 	struct proc *p = td->td_proc;
502 	struct mdproc *mdp = &p->p_md;
503 	struct proc_ldt *pldt;
504 
505 	mtx_lock(&dt_lock);
506 	if ((pldt = mdp->md_ldt) == NULL) {
507 		mtx_unlock(&dt_lock);
508 		return;
509 	}
510 
511 	critical_enter();
512 	mdp->md_ldt = NULL;
513 	atomic_thread_fence_rel();
514 	bzero(&mdp->md_ldt_sd, sizeof(mdp->md_ldt_sd));
515 	if (td == curthread)
516 		lldt(GSEL(GNULL_SEL, SEL_KPL));
517 	critical_exit();
518 	user_ldt_deref(pldt);
519 }
520 
521 static void
522 user_ldt_derefl(struct proc_ldt *pldt)
523 {
524 
525 	if (--pldt->ldt_refcnt == 0) {
526 		kmem_free(kernel_arena, (vm_offset_t)pldt->ldt_base,
527 		    max_ldt_segment * sizeof(struct user_segment_descriptor));
528 		free(pldt, M_SUBPROC);
529 	}
530 }
531 
532 void
533 user_ldt_deref(struct proc_ldt *pldt)
534 {
535 
536 	mtx_assert(&dt_lock, MA_OWNED);
537 	user_ldt_derefl(pldt);
538 	mtx_unlock(&dt_lock);
539 }
540 
541 /*
542  * Note for the authors of compat layers (linux, etc): copyout() in
543  * the function below is not a problem since it presents data in
544  * arch-specific format (i.e. i386-specific in this case), not in
545  * the OS-specific one.
546  */
547 int
548 amd64_get_ldt(struct thread *td, struct i386_ldt_args *uap)
549 {
550 	struct proc_ldt *pldt;
551 	struct user_segment_descriptor *lp;
552 	uint64_t *data;
553 	u_int i, num;
554 	int error;
555 
556 #ifdef	DEBUG
557 	printf("amd64_get_ldt: start=%u num=%u descs=%p\n",
558 	    uap->start, uap->num, (void *)uap->descs);
559 #endif
560 
561 	pldt = td->td_proc->p_md.md_ldt;
562 	if (pldt == NULL || uap->start >= max_ldt_segment || uap->num == 0) {
563 		td->td_retval[0] = 0;
564 		return (0);
565 	}
566 	num = min(uap->num, max_ldt_segment - uap->start);
567 	lp = &((struct user_segment_descriptor *)(pldt->ldt_base))[uap->start];
568 	data = malloc(num * sizeof(struct user_segment_descriptor), M_TEMP,
569 	    M_WAITOK);
570 	mtx_lock(&dt_lock);
571 	for (i = 0; i < num; i++)
572 		data[i] = ((volatile uint64_t *)lp)[i];
573 	mtx_unlock(&dt_lock);
574 	error = copyout(data, uap->descs, num *
575 	    sizeof(struct user_segment_descriptor));
576 	free(data, M_TEMP);
577 	if (error == 0)
578 		td->td_retval[0] = num;
579 	return (error);
580 }
581 
582 int
583 amd64_set_ldt(struct thread *td, struct i386_ldt_args *uap,
584     struct user_segment_descriptor *descs)
585 {
586 	struct mdproc *mdp;
587 	struct proc_ldt *pldt;
588 	struct user_segment_descriptor *dp;
589 	struct proc *p;
590 	u_int largest_ld, i;
591 	int error;
592 
593 #ifdef	DEBUG
594 	printf("amd64_set_ldt: start=%u num=%u descs=%p\n",
595 	    uap->start, uap->num, (void *)uap->descs);
596 #endif
597 	mdp = &td->td_proc->p_md;
598 	error = 0;
599 
600 	set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
601 	p = td->td_proc;
602 	if (descs == NULL) {
603 		/* Free descriptors */
604 		if (uap->start == 0 && uap->num == 0)
605 			uap->num = max_ldt_segment;
606 		if (uap->num == 0)
607 			return (EINVAL);
608 		if ((pldt = mdp->md_ldt) == NULL ||
609 		    uap->start >= max_ldt_segment)
610 			return (0);
611 		largest_ld = uap->start + uap->num;
612 		if (largest_ld > max_ldt_segment)
613 			largest_ld = max_ldt_segment;
614 		if (largest_ld < uap->start)
615 			return (EINVAL);
616 		mtx_lock(&dt_lock);
617 		for (i = uap->start; i < largest_ld; i++)
618 			((volatile uint64_t *)(pldt->ldt_base))[i] = 0;
619 		mtx_unlock(&dt_lock);
620 		return (0);
621 	}
622 
623 	if (!(uap->start == LDT_AUTO_ALLOC && uap->num == 1)) {
624 		/* verify range of descriptors to modify */
625 		largest_ld = uap->start + uap->num;
626 		if (uap->start >= max_ldt_segment ||
627 		    largest_ld > max_ldt_segment ||
628 		    largest_ld < uap->start)
629 			return (EINVAL);
630 	}
631 
632 	/* Check descriptors for access violations */
633 	for (i = 0; i < uap->num; i++) {
634 		dp = &descs[i];
635 
636 		switch (dp->sd_type) {
637 		case SDT_SYSNULL:	/* system null */
638 			dp->sd_p = 0;
639 			break;
640 		case SDT_SYS286TSS:
641 		case SDT_SYSLDT:
642 		case SDT_SYS286BSY:
643 		case SDT_SYS286CGT:
644 		case SDT_SYSTASKGT:
645 		case SDT_SYS286IGT:
646 		case SDT_SYS286TGT:
647 		case SDT_SYSNULL2:
648 		case SDT_SYSTSS:
649 		case SDT_SYSNULL3:
650 		case SDT_SYSBSY:
651 		case SDT_SYSCGT:
652 		case SDT_SYSNULL4:
653 		case SDT_SYSIGT:
654 		case SDT_SYSTGT:
655 			return (EACCES);
656 
657 		/* memory segment types */
658 		case SDT_MEMEC:   /* memory execute only conforming */
659 		case SDT_MEMEAC:  /* memory execute only accessed conforming */
660 		case SDT_MEMERC:  /* memory execute read conforming */
661 		case SDT_MEMERAC: /* memory execute read accessed conforming */
662 			 /* Must be "present" if executable and conforming. */
663 			if (dp->sd_p == 0)
664 				return (EACCES);
665 			break;
666 		case SDT_MEMRO:   /* memory read only */
667 		case SDT_MEMROA:  /* memory read only accessed */
668 		case SDT_MEMRW:   /* memory read write */
669 		case SDT_MEMRWA:  /* memory read write accessed */
670 		case SDT_MEMROD:  /* memory read only expand dwn limit */
671 		case SDT_MEMRODA: /* memory read only expand dwn lim accessed */
672 		case SDT_MEMRWD:  /* memory read write expand dwn limit */
673 		case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */
674 		case SDT_MEME:    /* memory execute only */
675 		case SDT_MEMEA:   /* memory execute only accessed */
676 		case SDT_MEMER:   /* memory execute read */
677 		case SDT_MEMERA:  /* memory execute read accessed */
678 			break;
679 		default:
680 			return(EINVAL);
681 		}
682 
683 		/* Only user (ring-3) descriptors may be present. */
684 		if ((dp->sd_p != 0) && (dp->sd_dpl != SEL_UPL))
685 			return (EACCES);
686 	}
687 
688 	if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
689 		/* Allocate a free slot */
690 		mtx_lock(&dt_lock);
691 		pldt = user_ldt_alloc(p, 0);
692 		if (pldt == NULL) {
693 			mtx_unlock(&dt_lock);
694 			return (ENOMEM);
695 		}
696 
697 		/*
698 		 * start scanning a bit up to leave room for NVidia and
699 		 * Wine, which still user the "Blat" method of allocation.
700 		 */
701 		i = 16;
702 		dp = &((struct user_segment_descriptor *)(pldt->ldt_base))[i];
703 		for (; i < max_ldt_segment; ++i, ++dp) {
704 			if (dp->sd_type == SDT_SYSNULL)
705 				break;
706 		}
707 		if (i >= max_ldt_segment) {
708 			mtx_unlock(&dt_lock);
709 			return (ENOSPC);
710 		}
711 		uap->start = i;
712 		error = amd64_set_ldt_data(td, i, 1, descs);
713 		mtx_unlock(&dt_lock);
714 	} else {
715 		largest_ld = uap->start + uap->num;
716 		if (largest_ld > max_ldt_segment)
717 			return (EINVAL);
718 		mtx_lock(&dt_lock);
719 		if (user_ldt_alloc(p, 0) != NULL) {
720 			error = amd64_set_ldt_data(td, uap->start, uap->num,
721 			    descs);
722 		}
723 		mtx_unlock(&dt_lock);
724 	}
725 	if (error == 0)
726 		td->td_retval[0] = uap->start;
727 	return (error);
728 }
729 
730 int
731 amd64_set_ldt_data(struct thread *td, int start, int num,
732     struct user_segment_descriptor *descs)
733 {
734 	struct mdproc *mdp;
735 	struct proc_ldt *pldt;
736 	volatile uint64_t *dst, *src;
737 	int i;
738 
739 	mtx_assert(&dt_lock, MA_OWNED);
740 
741 	mdp = &td->td_proc->p_md;
742 	pldt = mdp->md_ldt;
743 	dst = (volatile uint64_t *)(pldt->ldt_base);
744 	src = (volatile uint64_t *)descs;
745 	for (i = 0; i < num; i++)
746 		dst[start + i] = src[i];
747 	return (0);
748 }
749