xref: /freebsd/sys/amd64/amd64/sys_machdep.c (revision 780fb4a2)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2003 Peter Wemm.
5  * Copyright (c) 1990 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)sys_machdep.c	5.5 (Berkeley) 1/19/91
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_capsicum.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/capsicum.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mutex.h>
47 #include <sys/priv.h>
48 #include <sys/proc.h>
49 #include <sys/smp.h>
50 #include <sys/sysproto.h>
51 #include <sys/uio.h>
52 
53 #include <vm/vm.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_kern.h>		/* for kernel_map */
56 #include <vm/vm_extern.h>
57 
58 #include <machine/frame.h>
59 #include <machine/md_var.h>
60 #include <machine/pcb.h>
61 #include <machine/specialreg.h>
62 #include <machine/sysarch.h>
63 #include <machine/tss.h>
64 #include <machine/vmparam.h>
65 
66 #include <security/audit/audit.h>
67 
68 static void user_ldt_deref(struct proc_ldt *pldt);
69 static void user_ldt_derefl(struct proc_ldt *pldt);
70 
71 #define	MAX_LD		8192
72 
73 int max_ldt_segment = 512;
74 SYSCTL_INT(_machdep, OID_AUTO, max_ldt_segment, CTLFLAG_RDTUN,
75     &max_ldt_segment, 0,
76     "Maximum number of allowed LDT segments in the single address space");
77 
78 static void
79 max_ldt_segment_init(void *arg __unused)
80 {
81 
82 	if (max_ldt_segment <= 0)
83 		max_ldt_segment = 1;
84 	if (max_ldt_segment > MAX_LD)
85 		max_ldt_segment = MAX_LD;
86 }
87 SYSINIT(maxldt, SI_SUB_VM_CONF, SI_ORDER_ANY, max_ldt_segment_init, NULL);
88 
89 #ifndef _SYS_SYSPROTO_H_
90 struct sysarch_args {
91 	int op;
92 	char *parms;
93 };
94 #endif
95 
96 int
97 sysarch_ldt(struct thread *td, struct sysarch_args *uap, int uap_space)
98 {
99 	struct i386_ldt_args *largs, la;
100 	struct user_segment_descriptor *lp;
101 	int error = 0;
102 
103 	/*
104 	 * XXXKIB check that the BSM generation code knows to encode
105 	 * the op argument.
106 	 */
107 	AUDIT_ARG_CMD(uap->op);
108 	if (uap_space == UIO_USERSPACE) {
109 		error = copyin(uap->parms, &la, sizeof(struct i386_ldt_args));
110 		if (error != 0)
111 			return (error);
112 		largs = &la;
113 	} else
114 		largs = (struct i386_ldt_args *)uap->parms;
115 
116 	switch (uap->op) {
117 	case I386_GET_LDT:
118 		error = amd64_get_ldt(td, largs);
119 		break;
120 	case I386_SET_LDT:
121 		if (largs->descs != NULL && largs->num > max_ldt_segment)
122 			return (EINVAL);
123 		set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
124 		if (largs->descs != NULL) {
125 			lp = malloc(largs->num * sizeof(struct
126 			    user_segment_descriptor), M_TEMP, M_WAITOK);
127 			error = copyin(largs->descs, lp, largs->num *
128 			    sizeof(struct user_segment_descriptor));
129 			if (error == 0)
130 				error = amd64_set_ldt(td, largs, lp);
131 			free(lp, M_TEMP);
132 		} else {
133 			error = amd64_set_ldt(td, largs, NULL);
134 		}
135 		break;
136 	}
137 	return (error);
138 }
139 
140 void
141 update_gdt_gsbase(struct thread *td, uint32_t base)
142 {
143 	struct user_segment_descriptor *sd;
144 
145 	if (td != curthread)
146 		return;
147 	set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
148 	critical_enter();
149 	sd = PCPU_GET(gs32p);
150 	sd->sd_lobase = base & 0xffffff;
151 	sd->sd_hibase = (base >> 24) & 0xff;
152 	critical_exit();
153 }
154 
155 void
156 update_gdt_fsbase(struct thread *td, uint32_t base)
157 {
158 	struct user_segment_descriptor *sd;
159 
160 	if (td != curthread)
161 		return;
162 	set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
163 	critical_enter();
164 	sd = PCPU_GET(fs32p);
165 	sd->sd_lobase = base & 0xffffff;
166 	sd->sd_hibase = (base >> 24) & 0xff;
167 	critical_exit();
168 }
169 
170 int
171 sysarch(struct thread *td, struct sysarch_args *uap)
172 {
173 	int error = 0;
174 	struct pcb *pcb = curthread->td_pcb;
175 	uint32_t i386base;
176 	uint64_t a64base;
177 	struct i386_ioperm_args iargs;
178 	struct i386_get_xfpustate i386xfpu;
179 	struct amd64_get_xfpustate a64xfpu;
180 
181 #ifdef CAPABILITY_MODE
182 	/*
183 	 * When adding new operations, add a new case statement here to
184 	 * explicitly indicate whether or not the operation is safe to
185 	 * perform in capability mode.
186 	 */
187 	if (IN_CAPABILITY_MODE(td)) {
188 		switch (uap->op) {
189 		case I386_GET_LDT:
190 		case I386_SET_LDT:
191 		case I386_GET_IOPERM:
192 		case I386_GET_FSBASE:
193 		case I386_SET_FSBASE:
194 		case I386_GET_GSBASE:
195 		case I386_SET_GSBASE:
196 		case I386_GET_XFPUSTATE:
197 		case AMD64_GET_FSBASE:
198 		case AMD64_SET_FSBASE:
199 		case AMD64_GET_GSBASE:
200 		case AMD64_SET_GSBASE:
201 		case AMD64_GET_XFPUSTATE:
202 			break;
203 
204 		case I386_SET_IOPERM:
205 		default:
206 #ifdef KTRACE
207 			if (KTRPOINT(td, KTR_CAPFAIL))
208 				ktrcapfail(CAPFAIL_SYSCALL, NULL, NULL);
209 #endif
210 			return (ECAPMODE);
211 		}
212 	}
213 #endif
214 
215 	if (uap->op == I386_GET_LDT || uap->op == I386_SET_LDT)
216 		return (sysarch_ldt(td, uap, UIO_USERSPACE));
217 	/*
218 	 * XXXKIB check that the BSM generation code knows to encode
219 	 * the op argument.
220 	 */
221 	AUDIT_ARG_CMD(uap->op);
222 	switch (uap->op) {
223 	case I386_GET_IOPERM:
224 	case I386_SET_IOPERM:
225 		if ((error = copyin(uap->parms, &iargs,
226 		    sizeof(struct i386_ioperm_args))) != 0)
227 			return (error);
228 		break;
229 	case I386_GET_XFPUSTATE:
230 		if ((error = copyin(uap->parms, &i386xfpu,
231 		    sizeof(struct i386_get_xfpustate))) != 0)
232 			return (error);
233 		a64xfpu.addr = (void *)(uintptr_t)i386xfpu.addr;
234 		a64xfpu.len = i386xfpu.len;
235 		break;
236 	case AMD64_GET_XFPUSTATE:
237 		if ((error = copyin(uap->parms, &a64xfpu,
238 		    sizeof(struct amd64_get_xfpustate))) != 0)
239 			return (error);
240 		break;
241 	default:
242 		break;
243 	}
244 
245 	switch (uap->op) {
246 	case I386_GET_IOPERM:
247 		error = amd64_get_ioperm(td, &iargs);
248 		if (error == 0)
249 			error = copyout(&iargs, uap->parms,
250 			    sizeof(struct i386_ioperm_args));
251 		break;
252 	case I386_SET_IOPERM:
253 		error = amd64_set_ioperm(td, &iargs);
254 		break;
255 	case I386_GET_FSBASE:
256 		update_pcb_bases(pcb);
257 		i386base = pcb->pcb_fsbase;
258 		error = copyout(&i386base, uap->parms, sizeof(i386base));
259 		break;
260 	case I386_SET_FSBASE:
261 		error = copyin(uap->parms, &i386base, sizeof(i386base));
262 		if (!error) {
263 			set_pcb_flags(pcb, PCB_FULL_IRET);
264 			pcb->pcb_fsbase = i386base;
265 			td->td_frame->tf_fs = _ufssel;
266 			update_gdt_fsbase(td, i386base);
267 		}
268 		break;
269 	case I386_GET_GSBASE:
270 		update_pcb_bases(pcb);
271 		i386base = pcb->pcb_gsbase;
272 		error = copyout(&i386base, uap->parms, sizeof(i386base));
273 		break;
274 	case I386_SET_GSBASE:
275 		error = copyin(uap->parms, &i386base, sizeof(i386base));
276 		if (!error) {
277 			set_pcb_flags(pcb, PCB_FULL_IRET);
278 			pcb->pcb_gsbase = i386base;
279 			td->td_frame->tf_gs = _ugssel;
280 			update_gdt_gsbase(td, i386base);
281 		}
282 		break;
283 	case AMD64_GET_FSBASE:
284 		update_pcb_bases(pcb);
285 		error = copyout(&pcb->pcb_fsbase, uap->parms,
286 		    sizeof(pcb->pcb_fsbase));
287 		break;
288 
289 	case AMD64_SET_FSBASE:
290 		error = copyin(uap->parms, &a64base, sizeof(a64base));
291 		if (!error) {
292 			if (a64base < VM_MAXUSER_ADDRESS) {
293 				set_pcb_flags(pcb, PCB_FULL_IRET);
294 				pcb->pcb_fsbase = a64base;
295 				td->td_frame->tf_fs = _ufssel;
296 			} else
297 				error = EINVAL;
298 		}
299 		break;
300 
301 	case AMD64_GET_GSBASE:
302 		update_pcb_bases(pcb);
303 		error = copyout(&pcb->pcb_gsbase, uap->parms,
304 		    sizeof(pcb->pcb_gsbase));
305 		break;
306 
307 	case AMD64_SET_GSBASE:
308 		error = copyin(uap->parms, &a64base, sizeof(a64base));
309 		if (!error) {
310 			if (a64base < VM_MAXUSER_ADDRESS) {
311 				set_pcb_flags(pcb, PCB_FULL_IRET);
312 				pcb->pcb_gsbase = a64base;
313 				td->td_frame->tf_gs = _ugssel;
314 			} else
315 				error = EINVAL;
316 		}
317 		break;
318 
319 	case I386_GET_XFPUSTATE:
320 	case AMD64_GET_XFPUSTATE:
321 		if (a64xfpu.len > cpu_max_ext_state_size -
322 		    sizeof(struct savefpu))
323 			return (EINVAL);
324 		fpugetregs(td);
325 		error = copyout((char *)(get_pcb_user_save_td(td) + 1),
326 		    a64xfpu.addr, a64xfpu.len);
327 		break;
328 
329 	default:
330 		error = EINVAL;
331 		break;
332 	}
333 	return (error);
334 }
335 
336 int
337 amd64_set_ioperm(td, uap)
338 	struct thread *td;
339 	struct i386_ioperm_args *uap;
340 {
341 	char *iomap;
342 	struct amd64tss *tssp;
343 	struct system_segment_descriptor *tss_sd;
344 	struct pcb *pcb;
345 	u_int i;
346 	int error;
347 
348 	if ((error = priv_check(td, PRIV_IO)) != 0)
349 		return (error);
350 	if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
351 		return (error);
352 	if (uap->start > uap->start + uap->length ||
353 	    uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
354 		return (EINVAL);
355 
356 	/*
357 	 * XXX
358 	 * While this is restricted to root, we should probably figure out
359 	 * whether any other driver is using this i/o address, as so not to
360 	 * cause confusion.  This probably requires a global 'usage registry'.
361 	 */
362 	pcb = td->td_pcb;
363 	if (pcb->pcb_tssp == NULL) {
364 		tssp = (struct amd64tss *)kmem_malloc(kernel_arena,
365 		    ctob(IOPAGES + 1), M_WAITOK);
366 		pmap_pti_add_kva((vm_offset_t)tssp, (vm_offset_t)tssp +
367 		    ctob(IOPAGES + 1), false);
368 		iomap = (char *)&tssp[1];
369 		memset(iomap, 0xff, IOPERM_BITMAP_SIZE);
370 		critical_enter();
371 		/* Takes care of tss_rsp0. */
372 		memcpy(tssp, &common_tss[PCPU_GET(cpuid)],
373 		    sizeof(struct amd64tss));
374 		tssp->tss_iobase = sizeof(*tssp);
375 		pcb->pcb_tssp = tssp;
376 		tss_sd = PCPU_GET(tss);
377 		tss_sd->sd_lobase = (u_long)tssp & 0xffffff;
378 		tss_sd->sd_hibase = ((u_long)tssp >> 24) & 0xfffffffffful;
379 		tss_sd->sd_type = SDT_SYSTSS;
380 		ltr(GSEL(GPROC0_SEL, SEL_KPL));
381 		PCPU_SET(tssp, tssp);
382 		critical_exit();
383 	} else
384 		iomap = (char *)&pcb->pcb_tssp[1];
385 	for (i = uap->start; i < uap->start + uap->length; i++) {
386 		if (uap->enable)
387 			iomap[i >> 3] &= ~(1 << (i & 7));
388 		else
389 			iomap[i >> 3] |= (1 << (i & 7));
390 	}
391 	return (error);
392 }
393 
394 int
395 amd64_get_ioperm(td, uap)
396 	struct thread *td;
397 	struct i386_ioperm_args *uap;
398 {
399 	int i, state;
400 	char *iomap;
401 
402 	if (uap->start >= IOPAGES * PAGE_SIZE * NBBY)
403 		return (EINVAL);
404 	if (td->td_pcb->pcb_tssp == NULL) {
405 		uap->length = 0;
406 		goto done;
407 	}
408 
409 	iomap = (char *)&td->td_pcb->pcb_tssp[1];
410 
411 	i = uap->start;
412 	state = (iomap[i >> 3] >> (i & 7)) & 1;
413 	uap->enable = !state;
414 	uap->length = 1;
415 
416 	for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) {
417 		if (state != ((iomap[i >> 3] >> (i & 7)) & 1))
418 			break;
419 		uap->length++;
420 	}
421 
422 done:
423 	return (0);
424 }
425 
426 /*
427  * Update the GDT entry pointing to the LDT to point to the LDT of the
428  * current process.
429  */
430 static void
431 set_user_ldt(struct mdproc *mdp)
432 {
433 
434 	*PCPU_GET(ldt) = mdp->md_ldt_sd;
435 	lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
436 }
437 
438 static void
439 set_user_ldt_rv(struct vmspace *vmsp)
440 {
441 	struct thread *td;
442 
443 	td = curthread;
444 	if (vmsp != td->td_proc->p_vmspace)
445 		return;
446 
447 	set_user_ldt(&td->td_proc->p_md);
448 }
449 
450 struct proc_ldt *
451 user_ldt_alloc(struct proc *p, int force)
452 {
453 	struct proc_ldt *pldt, *new_ldt;
454 	struct mdproc *mdp;
455 	struct soft_segment_descriptor sldt;
456 	vm_offset_t sva;
457 	vm_size_t sz;
458 
459 	mtx_assert(&dt_lock, MA_OWNED);
460 	mdp = &p->p_md;
461 	if (!force && mdp->md_ldt != NULL)
462 		return (mdp->md_ldt);
463 	mtx_unlock(&dt_lock);
464 	new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK);
465 	sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
466 	sva = kmem_malloc(kernel_arena, sz, M_WAITOK | M_ZERO);
467 	new_ldt->ldt_base = (caddr_t)sva;
468 	pmap_pti_add_kva(sva, sva + sz, false);
469 	new_ldt->ldt_refcnt = 1;
470 	sldt.ssd_base = sva;
471 	sldt.ssd_limit = sz - 1;
472 	sldt.ssd_type = SDT_SYSLDT;
473 	sldt.ssd_dpl = SEL_KPL;
474 	sldt.ssd_p = 1;
475 	sldt.ssd_long = 0;
476 	sldt.ssd_def32 = 0;
477 	sldt.ssd_gran = 0;
478 	mtx_lock(&dt_lock);
479 	pldt = mdp->md_ldt;
480 	if (pldt != NULL && !force) {
481 		pmap_pti_remove_kva(sva, sva + sz);
482 		kmem_free(kernel_arena, sva, sz);
483 		free(new_ldt, M_SUBPROC);
484 		return (pldt);
485 	}
486 
487 	if (pldt != NULL) {
488 		bcopy(pldt->ldt_base, new_ldt->ldt_base, max_ldt_segment *
489 		    sizeof(struct user_segment_descriptor));
490 		user_ldt_derefl(pldt);
491 	}
492 	critical_enter();
493 	ssdtosyssd(&sldt, &p->p_md.md_ldt_sd);
494 	atomic_thread_fence_rel();
495 	mdp->md_ldt = new_ldt;
496 	critical_exit();
497 	smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv, NULL,
498 	    p->p_vmspace);
499 
500 	return (mdp->md_ldt);
501 }
502 
503 void
504 user_ldt_free(struct thread *td)
505 {
506 	struct proc *p = td->td_proc;
507 	struct mdproc *mdp = &p->p_md;
508 	struct proc_ldt *pldt;
509 
510 	mtx_lock(&dt_lock);
511 	if ((pldt = mdp->md_ldt) == NULL) {
512 		mtx_unlock(&dt_lock);
513 		return;
514 	}
515 
516 	critical_enter();
517 	mdp->md_ldt = NULL;
518 	atomic_thread_fence_rel();
519 	bzero(&mdp->md_ldt_sd, sizeof(mdp->md_ldt_sd));
520 	if (td == curthread)
521 		lldt(GSEL(GNULL_SEL, SEL_KPL));
522 	critical_exit();
523 	user_ldt_deref(pldt);
524 }
525 
526 static void
527 user_ldt_derefl(struct proc_ldt *pldt)
528 {
529 	vm_offset_t sva;
530 	vm_size_t sz;
531 
532 	if (--pldt->ldt_refcnt == 0) {
533 		sva = (vm_offset_t)pldt->ldt_base;
534 		sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
535 		pmap_pti_remove_kva(sva, sva + sz);
536 		kmem_free(kernel_arena, sva, sz);
537 		free(pldt, M_SUBPROC);
538 	}
539 }
540 
541 static void
542 user_ldt_deref(struct proc_ldt *pldt)
543 {
544 
545 	mtx_assert(&dt_lock, MA_OWNED);
546 	user_ldt_derefl(pldt);
547 	mtx_unlock(&dt_lock);
548 }
549 
550 /*
551  * Note for the authors of compat layers (linux, etc): copyout() in
552  * the function below is not a problem since it presents data in
553  * arch-specific format (i.e. i386-specific in this case), not in
554  * the OS-specific one.
555  */
556 int
557 amd64_get_ldt(struct thread *td, struct i386_ldt_args *uap)
558 {
559 	struct proc_ldt *pldt;
560 	struct user_segment_descriptor *lp;
561 	uint64_t *data;
562 	u_int i, num;
563 	int error;
564 
565 #ifdef	DEBUG
566 	printf("amd64_get_ldt: start=%u num=%u descs=%p\n",
567 	    uap->start, uap->num, (void *)uap->descs);
568 #endif
569 
570 	pldt = td->td_proc->p_md.md_ldt;
571 	if (pldt == NULL || uap->start >= max_ldt_segment || uap->num == 0) {
572 		td->td_retval[0] = 0;
573 		return (0);
574 	}
575 	num = min(uap->num, max_ldt_segment - uap->start);
576 	lp = &((struct user_segment_descriptor *)(pldt->ldt_base))[uap->start];
577 	data = malloc(num * sizeof(struct user_segment_descriptor), M_TEMP,
578 	    M_WAITOK);
579 	mtx_lock(&dt_lock);
580 	for (i = 0; i < num; i++)
581 		data[i] = ((volatile uint64_t *)lp)[i];
582 	mtx_unlock(&dt_lock);
583 	error = copyout(data, uap->descs, num *
584 	    sizeof(struct user_segment_descriptor));
585 	free(data, M_TEMP);
586 	if (error == 0)
587 		td->td_retval[0] = num;
588 	return (error);
589 }
590 
591 int
592 amd64_set_ldt(struct thread *td, struct i386_ldt_args *uap,
593     struct user_segment_descriptor *descs)
594 {
595 	struct mdproc *mdp;
596 	struct proc_ldt *pldt;
597 	struct user_segment_descriptor *dp;
598 	struct proc *p;
599 	u_int largest_ld, i;
600 	int error;
601 
602 #ifdef	DEBUG
603 	printf("amd64_set_ldt: start=%u num=%u descs=%p\n",
604 	    uap->start, uap->num, (void *)uap->descs);
605 #endif
606 	mdp = &td->td_proc->p_md;
607 	error = 0;
608 
609 	set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
610 	p = td->td_proc;
611 	if (descs == NULL) {
612 		/* Free descriptors */
613 		if (uap->start == 0 && uap->num == 0)
614 			uap->num = max_ldt_segment;
615 		if (uap->num == 0)
616 			return (EINVAL);
617 		if ((pldt = mdp->md_ldt) == NULL ||
618 		    uap->start >= max_ldt_segment)
619 			return (0);
620 		largest_ld = uap->start + uap->num;
621 		if (largest_ld > max_ldt_segment)
622 			largest_ld = max_ldt_segment;
623 		if (largest_ld < uap->start)
624 			return (EINVAL);
625 		mtx_lock(&dt_lock);
626 		for (i = uap->start; i < largest_ld; i++)
627 			((volatile uint64_t *)(pldt->ldt_base))[i] = 0;
628 		mtx_unlock(&dt_lock);
629 		return (0);
630 	}
631 
632 	if (!(uap->start == LDT_AUTO_ALLOC && uap->num == 1)) {
633 		/* verify range of descriptors to modify */
634 		largest_ld = uap->start + uap->num;
635 		if (uap->start >= max_ldt_segment ||
636 		    largest_ld > max_ldt_segment ||
637 		    largest_ld < uap->start)
638 			return (EINVAL);
639 	}
640 
641 	/* Check descriptors for access violations */
642 	for (i = 0; i < uap->num; i++) {
643 		dp = &descs[i];
644 
645 		switch (dp->sd_type) {
646 		case SDT_SYSNULL:	/* system null */
647 			dp->sd_p = 0;
648 			break;
649 		case SDT_SYS286TSS:
650 		case SDT_SYSLDT:
651 		case SDT_SYS286BSY:
652 		case SDT_SYS286CGT:
653 		case SDT_SYSTASKGT:
654 		case SDT_SYS286IGT:
655 		case SDT_SYS286TGT:
656 		case SDT_SYSNULL2:
657 		case SDT_SYSTSS:
658 		case SDT_SYSNULL3:
659 		case SDT_SYSBSY:
660 		case SDT_SYSCGT:
661 		case SDT_SYSNULL4:
662 		case SDT_SYSIGT:
663 		case SDT_SYSTGT:
664 			return (EACCES);
665 
666 		/* memory segment types */
667 		case SDT_MEMEC:   /* memory execute only conforming */
668 		case SDT_MEMEAC:  /* memory execute only accessed conforming */
669 		case SDT_MEMERC:  /* memory execute read conforming */
670 		case SDT_MEMERAC: /* memory execute read accessed conforming */
671 			 /* Must be "present" if executable and conforming. */
672 			if (dp->sd_p == 0)
673 				return (EACCES);
674 			break;
675 		case SDT_MEMRO:   /* memory read only */
676 		case SDT_MEMROA:  /* memory read only accessed */
677 		case SDT_MEMRW:   /* memory read write */
678 		case SDT_MEMRWA:  /* memory read write accessed */
679 		case SDT_MEMROD:  /* memory read only expand dwn limit */
680 		case SDT_MEMRODA: /* memory read only expand dwn lim accessed */
681 		case SDT_MEMRWD:  /* memory read write expand dwn limit */
682 		case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */
683 		case SDT_MEME:    /* memory execute only */
684 		case SDT_MEMEA:   /* memory execute only accessed */
685 		case SDT_MEMER:   /* memory execute read */
686 		case SDT_MEMERA:  /* memory execute read accessed */
687 			break;
688 		default:
689 			return(EINVAL);
690 		}
691 
692 		/* Only user (ring-3) descriptors may be present. */
693 		if ((dp->sd_p != 0) && (dp->sd_dpl != SEL_UPL))
694 			return (EACCES);
695 	}
696 
697 	if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
698 		/* Allocate a free slot */
699 		mtx_lock(&dt_lock);
700 		pldt = user_ldt_alloc(p, 0);
701 		if (pldt == NULL) {
702 			mtx_unlock(&dt_lock);
703 			return (ENOMEM);
704 		}
705 
706 		/*
707 		 * start scanning a bit up to leave room for NVidia and
708 		 * Wine, which still user the "Blat" method of allocation.
709 		 */
710 		i = 16;
711 		dp = &((struct user_segment_descriptor *)(pldt->ldt_base))[i];
712 		for (; i < max_ldt_segment; ++i, ++dp) {
713 			if (dp->sd_type == SDT_SYSNULL)
714 				break;
715 		}
716 		if (i >= max_ldt_segment) {
717 			mtx_unlock(&dt_lock);
718 			return (ENOSPC);
719 		}
720 		uap->start = i;
721 		error = amd64_set_ldt_data(td, i, 1, descs);
722 		mtx_unlock(&dt_lock);
723 	} else {
724 		largest_ld = uap->start + uap->num;
725 		if (largest_ld > max_ldt_segment)
726 			return (EINVAL);
727 		mtx_lock(&dt_lock);
728 		if (user_ldt_alloc(p, 0) != NULL) {
729 			error = amd64_set_ldt_data(td, uap->start, uap->num,
730 			    descs);
731 		}
732 		mtx_unlock(&dt_lock);
733 	}
734 	if (error == 0)
735 		td->td_retval[0] = uap->start;
736 	return (error);
737 }
738 
739 int
740 amd64_set_ldt_data(struct thread *td, int start, int num,
741     struct user_segment_descriptor *descs)
742 {
743 	struct mdproc *mdp;
744 	struct proc_ldt *pldt;
745 	volatile uint64_t *dst, *src;
746 	int i;
747 
748 	mtx_assert(&dt_lock, MA_OWNED);
749 
750 	mdp = &td->td_proc->p_md;
751 	pldt = mdp->md_ldt;
752 	dst = (volatile uint64_t *)(pldt->ldt_base);
753 	src = (volatile uint64_t *)descs;
754 	for (i = 0; i < num; i++)
755 		dst[start + i] = src[i];
756 	return (0);
757 }
758