xref: /freebsd/sys/i386/i386/sys_machdep.c (revision d9c9c81c)
15b81b6b3SRodney W. Grimes /*-
25b81b6b3SRodney W. Grimes  * Copyright (c) 1990 The Regents of the University of California.
35b81b6b3SRodney W. Grimes  * All rights reserved.
45b81b6b3SRodney W. Grimes  *
55b81b6b3SRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
65b81b6b3SRodney W. Grimes  * modification, are permitted provided that the following conditions
75b81b6b3SRodney W. Grimes  * are met:
85b81b6b3SRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
95b81b6b3SRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
105b81b6b3SRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
115b81b6b3SRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
125b81b6b3SRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
13846351f8SWarner Losh  * 3. Neither the name of the University nor the names of its contributors
145b81b6b3SRodney W. Grimes  *    may be used to endorse or promote products derived from this software
155b81b6b3SRodney W. Grimes  *    without specific prior written permission.
165b81b6b3SRodney W. Grimes  *
175b81b6b3SRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
185b81b6b3SRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
195b81b6b3SRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
205b81b6b3SRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
215b81b6b3SRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
225b81b6b3SRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
235b81b6b3SRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
245b81b6b3SRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
255b81b6b3SRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
265b81b6b3SRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
275b81b6b3SRodney W. Grimes  * SUCH DAMAGE.
285b81b6b3SRodney W. Grimes  *
2947cacd38SRodney W. Grimes  *	from: @(#)sys_machdep.c	5.5 (Berkeley) 1/19/91
305b81b6b3SRodney W. Grimes  */
315b81b6b3SRodney W. Grimes 
329676a785SDavid E. O'Brien #include <sys/cdefs.h>
339676a785SDavid E. O'Brien __FBSDID("$FreeBSD$");
349676a785SDavid E. O'Brien 
3524c1c3bfSJonathan Anderson #include "opt_capsicum.h"
36b40ce416SJulian Elischer #include "opt_kstack_pages.h"
37268bdb43SPeter Wemm 
38f540b106SGarrett Wollman #include <sys/param.h>
394a144410SRobert Watson #include <sys/capsicum.h>
40f540b106SGarrett Wollman #include <sys/systm.h>
41fb919e4dSMark Murray #include <sys/lock.h>
4291c28bfdSLuoqi Chen #include <sys/malloc.h>
43fb919e4dSMark Murray #include <sys/mutex.h>
44acd3428bSRobert Watson #include <sys/priv.h>
45f540b106SGarrett Wollman #include <sys/proc.h>
466caa8a15SJohn Baldwin #include <sys/smp.h>
47fb919e4dSMark Murray #include <sys/sysproto.h>
48efeaf95aSDavid Greenman 
49efeaf95aSDavid Greenman #include <vm/vm.h>
50efeaf95aSDavid Greenman #include <vm/pmap.h>
51efeaf95aSDavid Greenman #include <vm/vm_map.h>
52efeaf95aSDavid Greenman #include <vm/vm_extern.h>
53efeaf95aSDavid Greenman 
54f540b106SGarrett Wollman #include <machine/cpu.h>
556004362eSDavid Schultz #include <machine/pcb.h>
566004362eSDavid Schultz #include <machine/pcb_ext.h>
5724db0459SJohn Baldwin #include <machine/proc.h>
58f540b106SGarrett Wollman #include <machine/sysarch.h>
59812a11a2SDavid Greenman 
6098bf5a70SRobert Watson #include <security/audit/audit.h>
6198bf5a70SRobert Watson 
62f540b106SGarrett Wollman #include <vm/vm_kern.h>		/* for kernel_map */
63da59a31cSDavid Greenman 
640dbf6d73SJordan K. Hubbard #define MAX_LD 8192
650dbf6d73SJordan K. Hubbard #define LD_PER_PAGE 512
66d9c9c81cSPedro F. Giffuni #define	NEW_MAX_LD(num)  rounddown2(num + LD_PER_PAGE, LD_PER_PAGE)
670dbf6d73SJordan K. Hubbard #define SIZE_FROM_LARGEST_LD(num) (NEW_MAX_LD(num) << 3)
6805dfa22fSAttilio Rao #define	NULL_LDT_BASE	((caddr_t)NULL)
690dbf6d73SJordan K. Hubbard 
70ea11c140SAttilio Rao #ifdef SMP
7105dfa22fSAttilio Rao static void set_user_ldt_rv(struct vmspace *vmsp);
7205dfa22fSAttilio Rao #endif
73dae8d52dSDavid Xu static int i386_set_ldt_data(struct thread *, int start, int num,
74dae8d52dSDavid Xu 	union descriptor *descs);
75dae8d52dSDavid Xu static int i386_ldt_grow(struct thread *td, int len);
765b81b6b3SRodney W. Grimes 
7706d058bdSKonstantin Belousov void
7806d058bdSKonstantin Belousov fill_based_sd(struct segment_descriptor *sdp, uint32_t base)
7906d058bdSKonstantin Belousov {
8006d058bdSKonstantin Belousov 
8106d058bdSKonstantin Belousov 	sdp->sd_lobase = base & 0xffffff;
8206d058bdSKonstantin Belousov 	sdp->sd_hibase = (base >> 24) & 0xff;
8306d058bdSKonstantin Belousov 	sdp->sd_lolimit = 0xffff;	/* 4GB limit, wraps around */
8406d058bdSKonstantin Belousov 	sdp->sd_hilimit = 0xf;
8506d058bdSKonstantin Belousov 	sdp->sd_type = SDT_MEMRWA;
8606d058bdSKonstantin Belousov 	sdp->sd_dpl = SEL_UPL;
8706d058bdSKonstantin Belousov 	sdp->sd_p = 1;
8806d058bdSKonstantin Belousov 	sdp->sd_xx = 0;
8906d058bdSKonstantin Belousov 	sdp->sd_def32 = 1;
9006d058bdSKonstantin Belousov 	sdp->sd_gran = 1;
9106d058bdSKonstantin Belousov }
9206d058bdSKonstantin Belousov 
932f1ba63bSBruce Evans #ifndef _SYS_SYSPROTO_H_
94812a11a2SDavid Greenman struct sysarch_args {
95812a11a2SDavid Greenman 	int op;
96812a11a2SDavid Greenman 	char *parms;
9701ae5b20SDavid Greenman };
982f1ba63bSBruce Evans #endif
9901ae5b20SDavid Greenman 
100812a11a2SDavid Greenman int
101b40ce416SJulian Elischer sysarch(td, uap)
102b40ce416SJulian Elischer 	struct thread *td;
103812a11a2SDavid Greenman 	register struct sysarch_args *uap;
1045b81b6b3SRodney W. Grimes {
1057ff022c4SJohn Baldwin 	int error;
10684569dffSMaxim Sobolev 	union descriptor *lp;
10784569dffSMaxim Sobolev 	union {
10884569dffSMaxim Sobolev 		struct i386_ldt_args largs;
10984569dffSMaxim Sobolev 		struct i386_ioperm_args iargs;
110824fc460SJohn Baldwin 		struct i386_get_xfpustate xfpu;
11184569dffSMaxim Sobolev 	} kargs;
112e0ab2c6dSPeter Wemm 	uint32_t base;
113e0ab2c6dSPeter Wemm 	struct segment_descriptor sd, *sdp;
11484569dffSMaxim Sobolev 
11514961ba7SRobert Watson 	AUDIT_ARG_CMD(uap->op);
11674b5505eSRobert Watson 
11724c1c3bfSJonathan Anderson #ifdef CAPABILITY_MODE
11874b5505eSRobert Watson 	/*
11912bc222eSJonathan Anderson 	 * When adding new operations, add a new case statement here to
12012bc222eSJonathan Anderson 	 * explicitly indicate whether or not the operation is safe to
12112bc222eSJonathan Anderson 	 * perform in capability mode.
12274b5505eSRobert Watson 	 */
12374b5505eSRobert Watson 	if (IN_CAPABILITY_MODE(td)) {
12474b5505eSRobert Watson 		switch (uap->op) {
12574b5505eSRobert Watson 		case I386_GET_LDT:
12674b5505eSRobert Watson 		case I386_SET_LDT:
12774b5505eSRobert Watson 		case I386_GET_IOPERM:
12874b5505eSRobert Watson 		case I386_GET_FSBASE:
12974b5505eSRobert Watson 		case I386_SET_FSBASE:
13074b5505eSRobert Watson 		case I386_GET_GSBASE:
13174b5505eSRobert Watson 		case I386_SET_GSBASE:
132824fc460SJohn Baldwin 		case I386_GET_XFPUSTATE:
13374b5505eSRobert Watson 			break;
13474b5505eSRobert Watson 
13574b5505eSRobert Watson 		case I386_SET_IOPERM:
13674b5505eSRobert Watson 		default:
137a417d4a4SDag-Erling Smørgrav #ifdef KTRACE
138a417d4a4SDag-Erling Smørgrav 			if (KTRPOINT(td, KTR_CAPFAIL))
1393fded357SPawel Jakub Dawidek 				ktrcapfail(CAPFAIL_SYSCALL, NULL, NULL);
140a417d4a4SDag-Erling Smørgrav #endif
14174b5505eSRobert Watson 			return (ECAPMODE);
14274b5505eSRobert Watson 		}
14374b5505eSRobert Watson 	}
14474b5505eSRobert Watson #endif
14574b5505eSRobert Watson 
14684569dffSMaxim Sobolev 	switch (uap->op) {
14784569dffSMaxim Sobolev 	case I386_GET_IOPERM:
14884569dffSMaxim Sobolev 	case I386_SET_IOPERM:
14984569dffSMaxim Sobolev 		if ((error = copyin(uap->parms, &kargs.iargs,
15084569dffSMaxim Sobolev 		    sizeof(struct i386_ioperm_args))) != 0)
15184569dffSMaxim Sobolev 			return (error);
15284569dffSMaxim Sobolev 		break;
15384569dffSMaxim Sobolev 	case I386_GET_LDT:
15484569dffSMaxim Sobolev 	case I386_SET_LDT:
15584569dffSMaxim Sobolev 		if ((error = copyin(uap->parms, &kargs.largs,
15684569dffSMaxim Sobolev 		    sizeof(struct i386_ldt_args))) != 0)
15784569dffSMaxim Sobolev 			return (error);
1588a4d2b06SDavid Schultz 		if (kargs.largs.num > MAX_LD || kargs.largs.num <= 0)
1598a4d2b06SDavid Schultz 			return (EINVAL);
16084569dffSMaxim Sobolev 		break;
161824fc460SJohn Baldwin 	case I386_GET_XFPUSTATE:
162824fc460SJohn Baldwin 		if ((error = copyin(uap->parms, &kargs.xfpu,
163824fc460SJohn Baldwin 		    sizeof(struct i386_get_xfpustate))) != 0)
164824fc460SJohn Baldwin 			return (error);
165824fc460SJohn Baldwin 		break;
16684569dffSMaxim Sobolev 	default:
16784569dffSMaxim Sobolev 		break;
16884569dffSMaxim Sobolev 	}
1695b81b6b3SRodney W. Grimes 
170812a11a2SDavid Greenman 	switch(uap->op) {
171812a11a2SDavid Greenman 	case I386_GET_LDT:
17284569dffSMaxim Sobolev 		error = i386_get_ldt(td, &kargs.largs);
1735b81b6b3SRodney W. Grimes 		break;
174812a11a2SDavid Greenman 	case I386_SET_LDT:
17584569dffSMaxim Sobolev 		if (kargs.largs.descs != NULL) {
17631241fa7SKonstantin Belousov 			lp = (union descriptor *)malloc(
1772f84c08eSJeff Roberson 			    kargs.largs.num * sizeof(union descriptor),
17831241fa7SKonstantin Belousov 			    M_TEMP, M_WAITOK);
17984569dffSMaxim Sobolev 			error = copyin(kargs.largs.descs, lp,
18084569dffSMaxim Sobolev 			    kargs.largs.num * sizeof(union descriptor));
18184569dffSMaxim Sobolev 			if (error == 0)
18284569dffSMaxim Sobolev 				error = i386_set_ldt(td, &kargs.largs, lp);
18331241fa7SKonstantin Belousov 			free(lp, M_TEMP);
18484569dffSMaxim Sobolev 		} else {
18584569dffSMaxim Sobolev 			error = i386_set_ldt(td, &kargs.largs, NULL);
18684569dffSMaxim Sobolev 		}
1875b81b6b3SRodney W. Grimes 		break;
18848a09cf2SJohn Dyson 	case I386_GET_IOPERM:
18984569dffSMaxim Sobolev 		error = i386_get_ioperm(td, &kargs.iargs);
19084569dffSMaxim Sobolev 		if (error == 0)
19184569dffSMaxim Sobolev 			error = copyout(&kargs.iargs, uap->parms,
19284569dffSMaxim Sobolev 			    sizeof(struct i386_ioperm_args));
19348a09cf2SJohn Dyson 		break;
19448a09cf2SJohn Dyson 	case I386_SET_IOPERM:
19584569dffSMaxim Sobolev 		error = i386_set_ioperm(td, &kargs.iargs);
19648a09cf2SJohn Dyson 		break;
19748a09cf2SJohn Dyson 	case I386_VM86:
198b40ce416SJulian Elischer 		error = vm86_sysarch(td, uap->parms);
19948a09cf2SJohn Dyson 		break;
200e0ab2c6dSPeter Wemm 	case I386_GET_FSBASE:
201e0ab2c6dSPeter Wemm 		sdp = &td->td_pcb->pcb_fsd;
202e0ab2c6dSPeter Wemm 		base = sdp->sd_hibase << 24 | sdp->sd_lobase;
203e0ab2c6dSPeter Wemm 		error = copyout(&base, uap->parms, sizeof(base));
204e0ab2c6dSPeter Wemm 		break;
205e0ab2c6dSPeter Wemm 	case I386_SET_FSBASE:
206e0ab2c6dSPeter Wemm 		error = copyin(uap->parms, &base, sizeof(base));
20706d058bdSKonstantin Belousov 		if (error == 0) {
208e0ab2c6dSPeter Wemm 			/*
209e0ab2c6dSPeter Wemm 			 * Construct a descriptor and store it in the pcb for
210e0ab2c6dSPeter Wemm 			 * the next context switch.  Also store it in the gdt
211e0ab2c6dSPeter Wemm 			 * so that the load of tf_fs into %fs will activate it
212e0ab2c6dSPeter Wemm 			 * at return to userland.
213e0ab2c6dSPeter Wemm 			 */
21406d058bdSKonstantin Belousov 			fill_based_sd(&sd, base);
2154b1fa239SDavid Xu 			critical_enter();
216e0ab2c6dSPeter Wemm 			td->td_pcb->pcb_fsd = sd;
217e0ab2c6dSPeter Wemm 			PCPU_GET(fsgs_gdt)[0] = sd;
2184b1fa239SDavid Xu 			critical_exit();
2199a045ca1SDavid Xu 			td->td_frame->tf_fs = GSEL(GUFS_SEL, SEL_UPL);
220e0ab2c6dSPeter Wemm 		}
221e0ab2c6dSPeter Wemm 		break;
222e0ab2c6dSPeter Wemm 	case I386_GET_GSBASE:
223e0ab2c6dSPeter Wemm 		sdp = &td->td_pcb->pcb_gsd;
224e0ab2c6dSPeter Wemm 		base = sdp->sd_hibase << 24 | sdp->sd_lobase;
225e0ab2c6dSPeter Wemm 		error = copyout(&base, uap->parms, sizeof(base));
226e0ab2c6dSPeter Wemm 		break;
227e0ab2c6dSPeter Wemm 	case I386_SET_GSBASE:
228e0ab2c6dSPeter Wemm 		error = copyin(uap->parms, &base, sizeof(base));
22906d058bdSKonstantin Belousov 		if (error == 0) {
230e0ab2c6dSPeter Wemm 			/*
231e0ab2c6dSPeter Wemm 			 * Construct a descriptor and store it in the pcb for
232e0ab2c6dSPeter Wemm 			 * the next context switch.  Also store it in the gdt
233e0ab2c6dSPeter Wemm 			 * because we have to do a load_gs() right now.
234e0ab2c6dSPeter Wemm 			 */
23506d058bdSKonstantin Belousov 			fill_based_sd(&sd, base);
2364b1fa239SDavid Xu 			critical_enter();
237e0ab2c6dSPeter Wemm 			td->td_pcb->pcb_gsd = sd;
238e0ab2c6dSPeter Wemm 			PCPU_GET(fsgs_gdt)[1] = sd;
2394b1fa239SDavid Xu 			critical_exit();
240e0ab2c6dSPeter Wemm 			load_gs(GSEL(GUGS_SEL, SEL_UPL));
241e0ab2c6dSPeter Wemm 		}
242e0ab2c6dSPeter Wemm 		break;
243824fc460SJohn Baldwin 	case I386_GET_XFPUSTATE:
244824fc460SJohn Baldwin 		if (kargs.xfpu.len > cpu_max_ext_state_size -
245824fc460SJohn Baldwin 		    sizeof(union savefpu))
246824fc460SJohn Baldwin 			return (EINVAL);
247824fc460SJohn Baldwin 		npxgetregs(td);
248824fc460SJohn Baldwin 		error = copyout((char *)(get_pcb_user_save_td(td) + 1),
249824fc460SJohn Baldwin 		    kargs.xfpu.addr, kargs.xfpu.len);
250824fc460SJohn Baldwin 		break;
251812a11a2SDavid Greenman 	default:
2527ff022c4SJohn Baldwin 		error = EINVAL;
253812a11a2SDavid Greenman 		break;
254812a11a2SDavid Greenman 	}
255812a11a2SDavid Greenman 	return (error);
256812a11a2SDavid Greenman }
257da59a31cSDavid Greenman 
25848a09cf2SJohn Dyson int
259b40ce416SJulian Elischer i386_extend_pcb(struct thread *td)
26048a09cf2SJohn Dyson {
26148a09cf2SJohn Dyson 	int i, offset;
26248a09cf2SJohn Dyson 	u_long *addr;
26348a09cf2SJohn Dyson 	struct pcb_ext *ext;
26448a09cf2SJohn Dyson 	struct soft_segment_descriptor ssd = {
26548a09cf2SJohn Dyson 		0,			/* segment base address (overwritten) */
26648a09cf2SJohn Dyson 		ctob(IOPAGES + 1) - 1,	/* length */
26748a09cf2SJohn Dyson 		SDT_SYS386TSS,		/* segment type */
26848a09cf2SJohn Dyson 		0,			/* priority level */
26948a09cf2SJohn Dyson 		1,			/* descriptor present */
27048a09cf2SJohn Dyson 		0, 0,
27148a09cf2SJohn Dyson 		0,			/* default 32 size */
27248a09cf2SJohn Dyson 		0			/* granularity */
27348a09cf2SJohn Dyson 	};
27448a09cf2SJohn Dyson 
2755df87b21SJeff Roberson 	ext = (struct pcb_ext *)kmem_malloc(kernel_arena, ctob(IOPAGES+1),
27631241fa7SKonstantin Belousov 	    M_WAITOK | M_ZERO);
277b40ce416SJulian Elischer 	/* -16 is so we can convert a trapframe into vm86trapframe inplace */
278d9977530SJohn Baldwin 	ext->ext_tss.tss_esp0 = (vm_offset_t)td->td_pcb - 16;
27948a09cf2SJohn Dyson 	ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
28048a09cf2SJohn Dyson 	/*
28148a09cf2SJohn Dyson 	 * The last byte of the i/o map must be followed by an 0xff byte.
28248a09cf2SJohn Dyson 	 * We arbitrarily allocate 16 bytes here, to keep the starting
28348a09cf2SJohn Dyson 	 * address on a doubleword boundary.
28448a09cf2SJohn Dyson 	 */
28548a09cf2SJohn Dyson 	offset = PAGE_SIZE - 16;
28648a09cf2SJohn Dyson 	ext->ext_tss.tss_ioopt =
28748a09cf2SJohn Dyson 	    (offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16;
28848a09cf2SJohn Dyson 	ext->ext_iomap = (caddr_t)ext + offset;
28948a09cf2SJohn Dyson 	ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32;
29048a09cf2SJohn Dyson 
29148a09cf2SJohn Dyson 	addr = (u_long *)ext->ext_vm86.vm86_intmap;
29248a09cf2SJohn Dyson 	for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++)
29348a09cf2SJohn Dyson 		*addr++ = ~0;
29448a09cf2SJohn Dyson 
29548a09cf2SJohn Dyson 	ssd.ssd_base = (unsigned)&ext->ext_tss;
29648a09cf2SJohn Dyson 	ssd.ssd_limit -= ((unsigned)&ext->ext_tss - (unsigned)ext);
29748a09cf2SJohn Dyson 	ssdtosd(&ssd, &ext->ext_tssd);
29848a09cf2SJohn Dyson 
299f726a873SJohn Baldwin 	KASSERT(td == curthread, ("giving TSS to !curthread"));
300b40ce416SJulian Elischer 	KASSERT(td->td_pcb->pcb_ext == 0, ("already have a TSS!"));
301f726a873SJohn Baldwin 
302f726a873SJohn Baldwin 	/* Switch to the new TSS. */
3031bfa9108SDavid Xu 	critical_enter();
304b40ce416SJulian Elischer 	td->td_pcb->pcb_ext = ext;
3051bfa9108SDavid Xu 	PCPU_SET(private_tss, 1);
306f726a873SJohn Baldwin 	*PCPU_GET(tss_gdt) = ext->ext_tssd;
307f726a873SJohn Baldwin 	ltr(GSEL(GPROC0_SEL, SEL_KPL));
3081bfa9108SDavid Xu 	critical_exit();
30948a09cf2SJohn Dyson 
31048a09cf2SJohn Dyson 	return 0;
31148a09cf2SJohn Dyson }
31248a09cf2SJohn Dyson 
31384569dffSMaxim Sobolev int
31484569dffSMaxim Sobolev i386_set_ioperm(td, uap)
315b40ce416SJulian Elischer 	struct thread *td;
31684569dffSMaxim Sobolev 	struct i386_ioperm_args *uap;
31748a09cf2SJohn Dyson {
3189729f279SBruce Evans 	int i, error;
31948a09cf2SJohn Dyson 	char *iomap;
32048a09cf2SJohn Dyson 
321acd3428bSRobert Watson 	if ((error = priv_check(td, PRIV_IO)) != 0)
32248a09cf2SJohn Dyson 		return (error);
323a854ed98SJohn Baldwin 	if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
324330e7889SRobert Watson 		return (error);
32548a09cf2SJohn Dyson 	/*
32648a09cf2SJohn Dyson 	 * XXX
32748a09cf2SJohn Dyson 	 * While this is restricted to root, we should probably figure out
32848a09cf2SJohn Dyson 	 * whether any other driver is using this i/o address, as so not to
32948a09cf2SJohn Dyson 	 * cause confusion.  This probably requires a global 'usage registry'.
33048a09cf2SJohn Dyson 	 */
33148a09cf2SJohn Dyson 
332b40ce416SJulian Elischer 	if (td->td_pcb->pcb_ext == 0)
333b40ce416SJulian Elischer 		if ((error = i386_extend_pcb(td)) != 0)
33448a09cf2SJohn Dyson 			return (error);
335b40ce416SJulian Elischer 	iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
33648a09cf2SJohn Dyson 
33784569dffSMaxim Sobolev 	if (uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
33848a09cf2SJohn Dyson 		return (EINVAL);
33948a09cf2SJohn Dyson 
34084569dffSMaxim Sobolev 	for (i = uap->start; i < uap->start + uap->length; i++) {
34184569dffSMaxim Sobolev 		if (uap->enable)
34248a09cf2SJohn Dyson 			iomap[i >> 3] &= ~(1 << (i & 7));
34348a09cf2SJohn Dyson 		else
34448a09cf2SJohn Dyson 			iomap[i >> 3] |= (1 << (i & 7));
34548a09cf2SJohn Dyson 	}
34648a09cf2SJohn Dyson 	return (error);
34748a09cf2SJohn Dyson }
34848a09cf2SJohn Dyson 
34984569dffSMaxim Sobolev int
35084569dffSMaxim Sobolev i386_get_ioperm(td, uap)
351b40ce416SJulian Elischer 	struct thread *td;
35284569dffSMaxim Sobolev 	struct i386_ioperm_args *uap;
35348a09cf2SJohn Dyson {
35484569dffSMaxim Sobolev 	int i, state;
35548a09cf2SJohn Dyson 	char *iomap;
35648a09cf2SJohn Dyson 
35784569dffSMaxim Sobolev 	if (uap->start >= IOPAGES * PAGE_SIZE * NBBY)
3589729f279SBruce Evans 		return (EINVAL);
35948a09cf2SJohn Dyson 
360b40ce416SJulian Elischer 	if (td->td_pcb->pcb_ext == 0) {
36184569dffSMaxim Sobolev 		uap->length = 0;
36248a09cf2SJohn Dyson 		goto done;
36348a09cf2SJohn Dyson 	}
36448a09cf2SJohn Dyson 
365b40ce416SJulian Elischer 	iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
36648a09cf2SJohn Dyson 
36784569dffSMaxim Sobolev 	i = uap->start;
36838861023SPeter Wemm 	state = (iomap[i >> 3] >> (i & 7)) & 1;
36984569dffSMaxim Sobolev 	uap->enable = !state;
37084569dffSMaxim Sobolev 	uap->length = 1;
37148a09cf2SJohn Dyson 
37284569dffSMaxim Sobolev 	for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) {
37348a09cf2SJohn Dyson 		if (state != ((iomap[i >> 3] >> (i & 7)) & 1))
37448a09cf2SJohn Dyson 			break;
37584569dffSMaxim Sobolev 		uap->length++;
37648a09cf2SJohn Dyson 	}
37748a09cf2SJohn Dyson 
37848a09cf2SJohn Dyson done:
37984569dffSMaxim Sobolev 	return (0);
38048a09cf2SJohn Dyson }
38148a09cf2SJohn Dyson 
3820dbf6d73SJordan K. Hubbard /*
3830dbf6d73SJordan K. Hubbard  * Update the GDT entry pointing to the LDT to point to the LDT of the
3840ad5e7f3SJeff Roberson  * current process. Manage dt_lock holding/unholding autonomously.
3850dbf6d73SJordan K. Hubbard  */
386da59a31cSDavid Greenman void
38724db0459SJohn Baldwin set_user_ldt(struct mdproc *mdp)
388da59a31cSDavid Greenman {
38924db0459SJohn Baldwin 	struct proc_ldt *pldt;
3900ad5e7f3SJeff Roberson 	int dtlocked;
3910ad5e7f3SJeff Roberson 
3920ad5e7f3SJeff Roberson 	dtlocked = 0;
3930ad5e7f3SJeff Roberson 	if (!mtx_owned(&dt_lock)) {
3940ad5e7f3SJeff Roberson 		mtx_lock_spin(&dt_lock);
3950ad5e7f3SJeff Roberson 		dtlocked = 1;
3960ad5e7f3SJeff Roberson 	}
39791c28bfdSLuoqi Chen 
39824db0459SJohn Baldwin 	pldt = mdp->md_ldt;
3995206bca1SLuoqi Chen #ifdef SMP
40024db0459SJohn Baldwin 	gdt[PCPU_GET(cpuid) * NGDT + GUSERLDT_SEL].sd = pldt->ldt_sd;
4015206bca1SLuoqi Chen #else
40224db0459SJohn Baldwin 	gdt[GUSERLDT_SEL].sd = pldt->ldt_sd;
4035206bca1SLuoqi Chen #endif
404da59a31cSDavid Greenman 	lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
4054ef34f39SJake Burkholder 	PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL));
4060ad5e7f3SJeff Roberson 	if (dtlocked)
4070ad5e7f3SJeff Roberson 		mtx_unlock_spin(&dt_lock);
4081acf256dSJohn Baldwin }
4091acf256dSJohn Baldwin 
4105eb6e279SJohn Baldwin #ifdef SMP
4115eb6e279SJohn Baldwin static void
41205dfa22fSAttilio Rao set_user_ldt_rv(struct vmspace *vmsp)
4131acf256dSJohn Baldwin {
41405dfa22fSAttilio Rao 	struct thread *td;
4151acf256dSJohn Baldwin 
41605dfa22fSAttilio Rao 	td = curthread;
41705dfa22fSAttilio Rao 	if (vmsp != td->td_proc->p_vmspace)
4181acf256dSJohn Baldwin 		return;
4191acf256dSJohn Baldwin 
42024db0459SJohn Baldwin 	set_user_ldt(&td->td_proc->p_md);
421da59a31cSDavid Greenman }
4225eb6e279SJohn Baldwin #endif
423da59a31cSDavid Greenman 
424df4d012bSJohn Baldwin /*
4250ad5e7f3SJeff Roberson  * dt_lock must be held. Returns with dt_lock held.
426df4d012bSJohn Baldwin  */
42724db0459SJohn Baldwin struct proc_ldt *
42824db0459SJohn Baldwin user_ldt_alloc(struct mdproc *mdp, int len)
42991c28bfdSLuoqi Chen {
43024db0459SJohn Baldwin 	struct proc_ldt *pldt, *new_ldt;
43191c28bfdSLuoqi Chen 
4320ad5e7f3SJeff Roberson 	mtx_assert(&dt_lock, MA_OWNED);
4330ad5e7f3SJeff Roberson 	mtx_unlock_spin(&dt_lock);
4341ede983cSDag-Erling Smørgrav 	new_ldt = malloc(sizeof(struct proc_ldt),
435a163d034SWarner Losh 		M_SUBPROC, M_WAITOK);
43691c28bfdSLuoqi Chen 
43791c28bfdSLuoqi Chen 	new_ldt->ldt_len = len = NEW_MAX_LD(len);
4385df87b21SJeff Roberson 	new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
4392f84c08eSJeff Roberson 	    len * sizeof(union descriptor), M_WAITOK);
44005dfa22fSAttilio Rao 	new_ldt->ldt_refcnt = 1;
44191c28bfdSLuoqi Chen 	new_ldt->ldt_active = 0;
44291c28bfdSLuoqi Chen 
4430ad5e7f3SJeff Roberson 	mtx_lock_spin(&dt_lock);
44491c28bfdSLuoqi Chen 	gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)new_ldt->ldt_base;
44591c28bfdSLuoqi Chen 	gdt_segs[GUSERLDT_SEL].ssd_limit = len * sizeof(union descriptor) - 1;
44691c28bfdSLuoqi Chen 	ssdtosd(&gdt_segs[GUSERLDT_SEL], &new_ldt->ldt_sd);
44791c28bfdSLuoqi Chen 
4480ad5e7f3SJeff Roberson 	if ((pldt = mdp->md_ldt) != NULL) {
44924db0459SJohn Baldwin 		if (len > pldt->ldt_len)
45024db0459SJohn Baldwin 			len = pldt->ldt_len;
45124db0459SJohn Baldwin 		bcopy(pldt->ldt_base, new_ldt->ldt_base,
45291c28bfdSLuoqi Chen 		    len * sizeof(union descriptor));
4530ad5e7f3SJeff Roberson 	} else
45491c28bfdSLuoqi Chen 		bcopy(ldt, new_ldt->ldt_base, sizeof(ldt));
4550ad5e7f3SJeff Roberson 
4560ad5e7f3SJeff Roberson 	return (new_ldt);
45791c28bfdSLuoqi Chen }
45891c28bfdSLuoqi Chen 
459df4d012bSJohn Baldwin /*
46002b0a160SAttilio Rao  * Must be called with dt_lock held.  Returns with dt_lock unheld.
461df4d012bSJohn Baldwin  */
46291c28bfdSLuoqi Chen void
46324db0459SJohn Baldwin user_ldt_free(struct thread *td)
46491c28bfdSLuoqi Chen {
46524db0459SJohn Baldwin 	struct mdproc *mdp = &td->td_proc->p_md;
4660ad5e7f3SJeff Roberson 	struct proc_ldt *pldt;
46791c28bfdSLuoqi Chen 
4680ad5e7f3SJeff Roberson 	mtx_assert(&dt_lock, MA_OWNED);
469cb1d78d0SKonstantin Belousov 	if ((pldt = mdp->md_ldt) == NULL) {
470cb1d78d0SKonstantin Belousov 		mtx_unlock_spin(&dt_lock);
47191c28bfdSLuoqi Chen 		return;
472cb1d78d0SKonstantin Belousov 	}
47391c28bfdSLuoqi Chen 
474b494482fSJohn Baldwin 	if (td == curthread) {
47591c28bfdSLuoqi Chen 		lldt(_default_ldt);
4764ef34f39SJake Burkholder 		PCPU_SET(currentldt, _default_ldt);
47791c28bfdSLuoqi Chen 	}
47891c28bfdSLuoqi Chen 
47924db0459SJohn Baldwin 	mdp->md_ldt = NULL;
4809719da13SKonstantin Belousov 	user_ldt_deref(pldt);
4819719da13SKonstantin Belousov }
4829719da13SKonstantin Belousov 
4839719da13SKonstantin Belousov void
4849719da13SKonstantin Belousov user_ldt_deref(struct proc_ldt *pldt)
4859719da13SKonstantin Belousov {
4869719da13SKonstantin Belousov 
4879719da13SKonstantin Belousov 	mtx_assert(&dt_lock, MA_OWNED);
48805dfa22fSAttilio Rao 	if (--pldt->ldt_refcnt == 0) {
48902b0a160SAttilio Rao 		mtx_unlock_spin(&dt_lock);
4905df87b21SJeff Roberson 		kmem_free(kernel_arena, (vm_offset_t)pldt->ldt_base,
49124db0459SJohn Baldwin 			pldt->ldt_len * sizeof(union descriptor));
4921ede983cSDag-Erling Smørgrav 		free(pldt, M_SUBPROC);
49305dfa22fSAttilio Rao 	} else
49405dfa22fSAttilio Rao 		mtx_unlock_spin(&dt_lock);
49591c28bfdSLuoqi Chen }
49691c28bfdSLuoqi Chen 
49784569dffSMaxim Sobolev /*
49884569dffSMaxim Sobolev  * Note for the authors of compat layers (linux, etc): copyout() in
49984569dffSMaxim Sobolev  * the function below is not a problem since it presents data in
50084569dffSMaxim Sobolev  * arch-specific format (i.e. i386-specific in this case), not in
50184569dffSMaxim Sobolev  * the OS-specific one.
50284569dffSMaxim Sobolev  */
50384569dffSMaxim Sobolev int
50484569dffSMaxim Sobolev i386_get_ldt(td, uap)
505b40ce416SJulian Elischer 	struct thread *td;
50684569dffSMaxim Sobolev 	struct i386_ldt_args *uap;
507da59a31cSDavid Greenman {
508da59a31cSDavid Greenman 	int error = 0;
5090ad5e7f3SJeff Roberson 	struct proc_ldt *pldt;
510da59a31cSDavid Greenman 	int nldt, num;
511da59a31cSDavid Greenman 	union descriptor *lp;
512da59a31cSDavid Greenman 
513da59a31cSDavid Greenman #ifdef	DEBUG
51400671271SBruce Evans 	printf("i386_get_ldt: start=%d num=%d descs=%p\n",
5157a2bb3b8SLuoqi Chen 	    uap->start, uap->num, (void *)uap->descs);
516da59a31cSDavid Greenman #endif
517da59a31cSDavid Greenman 
5180ad5e7f3SJeff Roberson 	mtx_lock_spin(&dt_lock);
5190ad5e7f3SJeff Roberson 	if ((pldt = td->td_proc->p_md.md_ldt) != NULL) {
52024db0459SJohn Baldwin 		nldt = pldt->ldt_len;
52124db0459SJohn Baldwin 		lp = &((union descriptor *)(pldt->ldt_base))[uap->start];
5220ad5e7f3SJeff Roberson 		mtx_unlock_spin(&dt_lock);
5230ad5e7f3SJeff Roberson 		num = min(uap->num, nldt);
524da59a31cSDavid Greenman 	} else {
5250ad5e7f3SJeff Roberson 		mtx_unlock_spin(&dt_lock);
526da59a31cSDavid Greenman 		nldt = sizeof(ldt)/sizeof(ldt[0]);
527da59a31cSDavid Greenman 		num = min(uap->num, nldt);
528da59a31cSDavid Greenman 		lp = &ldt[uap->start];
529da59a31cSDavid Greenman 	}
5309b2dc15cSColin Percival 
5319b2dc15cSColin Percival 	if ((uap->start > (unsigned int)nldt) ||
5329b2dc15cSColin Percival 	    ((unsigned int)num > (unsigned int)nldt) ||
5339b2dc15cSColin Percival 	    ((unsigned int)(uap->start + num) > (unsigned int)nldt))
534da59a31cSDavid Greenman 		return(EINVAL);
535da59a31cSDavid Greenman 
5367a2bb3b8SLuoqi Chen 	error = copyout(lp, uap->descs, num * sizeof(union descriptor));
537da59a31cSDavid Greenman 	if (!error)
538b40ce416SJulian Elischer 		td->td_retval[0] = num;
539da59a31cSDavid Greenman 
540da59a31cSDavid Greenman 	return(error);
541da59a31cSDavid Greenman }
542da59a31cSDavid Greenman 
54384569dffSMaxim Sobolev int
54484569dffSMaxim Sobolev i386_set_ldt(td, uap, descs)
545b40ce416SJulian Elischer 	struct thread *td;
54684569dffSMaxim Sobolev 	struct i386_ldt_args *uap;
54784569dffSMaxim Sobolev 	union descriptor *descs;
548da59a31cSDavid Greenman {
549dae8d52dSDavid Xu 	int error = 0, i;
5500dbf6d73SJordan K. Hubbard 	int largest_ld;
55124db0459SJohn Baldwin 	struct mdproc *mdp = &td->td_proc->p_md;
552bc2e774aSJohn Baldwin 	struct proc_ldt *pldt;
55384569dffSMaxim Sobolev 	union descriptor *dp;
554da59a31cSDavid Greenman 
5551716a1afSJulian Elischer #ifdef	DEBUG
5561716a1afSJulian Elischer 	printf("i386_set_ldt: start=%d num=%d descs=%p\n",
5571716a1afSJulian Elischer 	    uap->start, uap->num, (void *)uap->descs);
5581716a1afSJulian Elischer #endif
5591716a1afSJulian Elischer 
56084569dffSMaxim Sobolev 	if (descs == NULL) {
561dae8d52dSDavid Xu 		/* Free descriptors */
562dae8d52dSDavid Xu 		if (uap->start == 0 && uap->num == 0) {
563dae8d52dSDavid Xu 			/*
564dae8d52dSDavid Xu 			 * Treat this as a special case, so userland needn't
565dae8d52dSDavid Xu 			 * know magic number NLDT.
566dae8d52dSDavid Xu 			 */
567dae8d52dSDavid Xu 			uap->start = NLDT;
568dae8d52dSDavid Xu 			uap->num = MAX_LD - NLDT;
5690dbf6d73SJordan K. Hubbard 		}
5706bd823f3SJohn Baldwin 		if (uap->num == 0)
571da59a31cSDavid Greenman 			return (EINVAL);
5720ad5e7f3SJeff Roberson 		mtx_lock_spin(&dt_lock);
5730ad5e7f3SJeff Roberson 		if ((pldt = mdp->md_ldt) == NULL ||
5740ad5e7f3SJeff Roberson 		    uap->start >= pldt->ldt_len) {
5750ad5e7f3SJeff Roberson 			mtx_unlock_spin(&dt_lock);
576dae8d52dSDavid Xu 			return (0);
577dae8d52dSDavid Xu 		}
578dae8d52dSDavid Xu 		largest_ld = uap->start + uap->num;
579dae8d52dSDavid Xu 		if (largest_ld > pldt->ldt_len)
580dae8d52dSDavid Xu 			largest_ld = pldt->ldt_len;
581dae8d52dSDavid Xu 		i = largest_ld - uap->start;
582dae8d52dSDavid Xu 		bzero(&((union descriptor *)(pldt->ldt_base))[uap->start],
583dae8d52dSDavid Xu 		    sizeof(union descriptor) * i);
5840ad5e7f3SJeff Roberson 		mtx_unlock_spin(&dt_lock);
585dae8d52dSDavid Xu 		return (0);
586dae8d52dSDavid Xu 	}
587dae8d52dSDavid Xu 
5885774db75SJulian Elischer 	if (!(uap->start == LDT_AUTO_ALLOC && uap->num == 1)) {
589dae8d52dSDavid Xu 		/* verify range of descriptors to modify */
590dae8d52dSDavid Xu 		largest_ld = uap->start + uap->num;
5916bd823f3SJohn Baldwin 		if (uap->start >= MAX_LD || largest_ld > MAX_LD) {
592dae8d52dSDavid Xu 			return (EINVAL);
593dae8d52dSDavid Xu 		}
594da59a31cSDavid Greenman 	}
595da59a31cSDavid Greenman 
596da59a31cSDavid Greenman 	/* Check descriptors for access violations */
597dae8d52dSDavid Xu 	for (i = 0; i < uap->num; i++) {
5988eb8107bSPeter Wemm 		dp = &descs[i];
599da59a31cSDavid Greenman 
6008eb8107bSPeter Wemm 		switch (dp->sd.sd_type) {
6010dbf6d73SJordan K. Hubbard 		case SDT_SYSNULL:	/* system null */
6028eb8107bSPeter Wemm 			dp->sd.sd_p = 0;
6030dbf6d73SJordan K. Hubbard 			break;
6040dbf6d73SJordan K. Hubbard 		case SDT_SYS286TSS: /* system 286 TSS available */
6050dbf6d73SJordan K. Hubbard 		case SDT_SYSLDT:    /* system local descriptor table */
6060dbf6d73SJordan K. Hubbard 		case SDT_SYS286BSY: /* system 286 TSS busy */
6070dbf6d73SJordan K. Hubbard 		case SDT_SYSTASKGT: /* system task gate */
6080dbf6d73SJordan K. Hubbard 		case SDT_SYS286IGT: /* system 286 interrupt gate */
6090dbf6d73SJordan K. Hubbard 		case SDT_SYS286TGT: /* system 286 trap gate */
6100dbf6d73SJordan K. Hubbard 		case SDT_SYSNULL2:  /* undefined by Intel */
6110dbf6d73SJordan K. Hubbard 		case SDT_SYS386TSS: /* system 386 TSS available */
6120dbf6d73SJordan K. Hubbard 		case SDT_SYSNULL3:  /* undefined by Intel */
6130dbf6d73SJordan K. Hubbard 		case SDT_SYS386BSY: /* system 386 TSS busy */
6140dbf6d73SJordan K. Hubbard 		case SDT_SYSNULL4:  /* undefined by Intel */
6150dbf6d73SJordan K. Hubbard 		case SDT_SYS386IGT: /* system 386 interrupt gate */
6160dbf6d73SJordan K. Hubbard 		case SDT_SYS386TGT: /* system 386 trap gate */
6170dbf6d73SJordan K. Hubbard 		case SDT_SYS286CGT: /* system 286 call gate */
6180dbf6d73SJordan K. Hubbard 		case SDT_SYS386CGT: /* system 386 call gate */
6190dbf6d73SJordan K. Hubbard 			/* I can't think of any reason to allow a user proc
6200dbf6d73SJordan K. Hubbard 			 * to create a segment of these types.  They are
6210dbf6d73SJordan K. Hubbard 			 * for OS use only.
6220dbf6d73SJordan K. Hubbard 			 */
623dae8d52dSDavid Xu 			return (EACCES);
624df4d012bSJohn Baldwin 			/*NOTREACHED*/
625da59a31cSDavid Greenman 
6260dbf6d73SJordan K. Hubbard 		/* memory segment types */
6270dbf6d73SJordan K. Hubbard 		case SDT_MEMEC:   /* memory execute only conforming */
6280dbf6d73SJordan K. Hubbard 		case SDT_MEMEAC:  /* memory execute only accessed conforming */
6290dbf6d73SJordan K. Hubbard 		case SDT_MEMERC:  /* memory execute read conforming */
6300dbf6d73SJordan K. Hubbard 		case SDT_MEMERAC: /* memory execute read accessed conforming */
6310dbf6d73SJordan K. Hubbard 			 /* Must be "present" if executable and conforming. */
63284569dffSMaxim Sobolev 			if (dp->sd.sd_p == 0)
633da59a31cSDavid Greenman 				return (EACCES);
634da59a31cSDavid Greenman 			break;
6350dbf6d73SJordan K. Hubbard 		case SDT_MEMRO:   /* memory read only */
6360dbf6d73SJordan K. Hubbard 		case SDT_MEMROA:  /* memory read only accessed */
6370dbf6d73SJordan K. Hubbard 		case SDT_MEMRW:   /* memory read write */
6380dbf6d73SJordan K. Hubbard 		case SDT_MEMRWA:  /* memory read write accessed */
6390dbf6d73SJordan K. Hubbard 		case SDT_MEMROD:  /* memory read only expand dwn limit */
6400dbf6d73SJordan K. Hubbard 		case SDT_MEMRODA: /* memory read only expand dwn lim accessed */
6410dbf6d73SJordan K. Hubbard 		case SDT_MEMRWD:  /* memory read write expand dwn limit */
6420dbf6d73SJordan K. Hubbard 		case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */
6430dbf6d73SJordan K. Hubbard 		case SDT_MEME:    /* memory execute only */
6440dbf6d73SJordan K. Hubbard 		case SDT_MEMEA:   /* memory execute only accessed */
6450dbf6d73SJordan K. Hubbard 		case SDT_MEMER:   /* memory execute read */
6460dbf6d73SJordan K. Hubbard 		case SDT_MEMERA:  /* memory execute read accessed */
647da59a31cSDavid Greenman 			break;
648da59a31cSDavid Greenman 		default:
6490dbf6d73SJordan K. Hubbard 			return(EINVAL);
650da59a31cSDavid Greenman 			/*NOTREACHED*/
651da59a31cSDavid Greenman 		}
6520dbf6d73SJordan K. Hubbard 
6530dbf6d73SJordan K. Hubbard 		/* Only user (ring-3) descriptors may be present. */
65484569dffSMaxim Sobolev 		if ((dp->sd.sd_p != 0) && (dp->sd.sd_dpl != SEL_UPL))
6550dbf6d73SJordan K. Hubbard 			return (EACCES);
656da59a31cSDavid Greenman 	}
657da59a31cSDavid Greenman 
6585774db75SJulian Elischer 	if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
659dae8d52dSDavid Xu 		/* Allocate a free slot */
6600ad5e7f3SJeff Roberson 		mtx_lock_spin(&dt_lock);
6610ad5e7f3SJeff Roberson 		if ((pldt = mdp->md_ldt) == NULL) {
6620ad5e7f3SJeff Roberson 			if ((error = i386_ldt_grow(td, NLDT + 1))) {
6630ad5e7f3SJeff Roberson 				mtx_unlock_spin(&dt_lock);
664dae8d52dSDavid Xu 				return (error);
6650ad5e7f3SJeff Roberson 			}
666dae8d52dSDavid Xu 			pldt = mdp->md_ldt;
667dae8d52dSDavid Xu 		}
668dae8d52dSDavid Xu again:
669f09fc81cSJulian Elischer 		/*
670f09fc81cSJulian Elischer 		 * start scanning a bit up to leave room for NVidia and
671f09fc81cSJulian Elischer 		 * Wine, which still user the "Blat" method of allocation.
672f09fc81cSJulian Elischer 		 */
6735774db75SJulian Elischer 		dp = &((union descriptor *)(pldt->ldt_base))[NLDT];
674e4e2c613SJulian Elischer 		for (i = NLDT; i < pldt->ldt_len; ++i) {
675dae8d52dSDavid Xu 			if (dp->sd.sd_type == SDT_SYSNULL)
676dae8d52dSDavid Xu 				break;
677dae8d52dSDavid Xu 			dp++;
678dae8d52dSDavid Xu 		}
679dae8d52dSDavid Xu 		if (i >= pldt->ldt_len) {
6800ad5e7f3SJeff Roberson 			if ((error = i386_ldt_grow(td, pldt->ldt_len+1))) {
6810ad5e7f3SJeff Roberson 				mtx_unlock_spin(&dt_lock);
682dae8d52dSDavid Xu 				return (error);
6830ad5e7f3SJeff Roberson 			}
684dae8d52dSDavid Xu 			goto again;
685dae8d52dSDavid Xu 		}
686dae8d52dSDavid Xu 		uap->start = i;
687dae8d52dSDavid Xu 		error = i386_set_ldt_data(td, i, 1, descs);
6880ad5e7f3SJeff Roberson 		mtx_unlock_spin(&dt_lock);
689dae8d52dSDavid Xu 	} else {
690dae8d52dSDavid Xu 		largest_ld = uap->start + uap->num;
6910ad5e7f3SJeff Roberson 		mtx_lock_spin(&dt_lock);
6920ad5e7f3SJeff Roberson 		if (!(error = i386_ldt_grow(td, largest_ld))) {
693dae8d52dSDavid Xu 			error = i386_set_ldt_data(td, uap->start, uap->num,
694dae8d52dSDavid Xu 			    descs);
695dae8d52dSDavid Xu 		}
6960ad5e7f3SJeff Roberson 		mtx_unlock_spin(&dt_lock);
697dae8d52dSDavid Xu 	}
698dae8d52dSDavid Xu 	if (error == 0)
699dae8d52dSDavid Xu 		td->td_retval[0] = uap->start;
700dae8d52dSDavid Xu 	return (error);
701dae8d52dSDavid Xu }
702dae8d52dSDavid Xu 
70393ee134aSKip Macy static int
70493ee134aSKip Macy i386_set_ldt_data(struct thread *td, int start, int num,
70593ee134aSKip Macy 	union descriptor *descs)
70693ee134aSKip Macy {
70793ee134aSKip Macy 	struct mdproc *mdp = &td->td_proc->p_md;
70893ee134aSKip Macy 	struct proc_ldt *pldt = mdp->md_ldt;
70993ee134aSKip Macy 
71093ee134aSKip Macy 	mtx_assert(&dt_lock, MA_OWNED);
71193ee134aSKip Macy 
71293ee134aSKip Macy 	/* Fill in range */
71393ee134aSKip Macy 	bcopy(descs,
71493ee134aSKip Macy 	    &((union descriptor *)(pldt->ldt_base))[start],
71593ee134aSKip Macy 	    num * sizeof(union descriptor));
71693ee134aSKip Macy 	return (0);
71793ee134aSKip Macy }
718dae8d52dSDavid Xu 
719dae8d52dSDavid Xu static int
720dae8d52dSDavid Xu i386_ldt_grow(struct thread *td, int len)
721dae8d52dSDavid Xu {
722dae8d52dSDavid Xu 	struct mdproc *mdp = &td->td_proc->p_md;
72305dfa22fSAttilio Rao 	struct proc_ldt *new_ldt, *pldt;
72405dfa22fSAttilio Rao 	caddr_t old_ldt_base = NULL_LDT_BASE;
72505dfa22fSAttilio Rao 	int old_ldt_len = 0;
726dae8d52dSDavid Xu 
7270ad5e7f3SJeff Roberson 	mtx_assert(&dt_lock, MA_OWNED);
7280ad5e7f3SJeff Roberson 
729dae8d52dSDavid Xu 	if (len > MAX_LD)
730dae8d52dSDavid Xu 		return (ENOMEM);
731dae8d52dSDavid Xu 	if (len < NLDT + 1)
732dae8d52dSDavid Xu 		len = NLDT + 1;
733bc2e774aSJohn Baldwin 
734bc2e774aSJohn Baldwin 	/* Allocate a user ldt. */
73502b0a160SAttilio Rao 	if ((pldt = mdp->md_ldt) == NULL || len > pldt->ldt_len) {
736bc2e774aSJohn Baldwin 		new_ldt = user_ldt_alloc(mdp, len);
737dae8d52dSDavid Xu 		if (new_ldt == NULL)
738dae8d52dSDavid Xu 			return (ENOMEM);
739dae8d52dSDavid Xu 		pldt = mdp->md_ldt;
740bc2e774aSJohn Baldwin 
7410ad5e7f3SJeff Roberson 		if (pldt != NULL) {
74205dfa22fSAttilio Rao 			if (new_ldt->ldt_len <= pldt->ldt_len) {
743dae8d52dSDavid Xu 				/*
74405dfa22fSAttilio Rao 				 * We just lost the race for allocation, so
74505dfa22fSAttilio Rao 				 * free the new object and return.
746dae8d52dSDavid Xu 				 */
74702b0a160SAttilio Rao 				mtx_unlock_spin(&dt_lock);
7485df87b21SJeff Roberson 				kmem_free(kernel_arena,
749dae8d52dSDavid Xu 				   (vm_offset_t)new_ldt->ldt_base,
750dae8d52dSDavid Xu 				   new_ldt->ldt_len * sizeof(union descriptor));
7511ede983cSDag-Erling Smørgrav 				free(new_ldt, M_SUBPROC);
75202b0a160SAttilio Rao 				mtx_lock_spin(&dt_lock);
753dae8d52dSDavid Xu 				return (0);
754dae8d52dSDavid Xu 			}
75505dfa22fSAttilio Rao 
75605dfa22fSAttilio Rao 			/*
75705dfa22fSAttilio Rao 			 * We have to substitute the current LDT entry for
75805dfa22fSAttilio Rao 			 * curproc with the new one since its size grew.
75905dfa22fSAttilio Rao 			 */
76005dfa22fSAttilio Rao 			old_ldt_base = pldt->ldt_base;
76105dfa22fSAttilio Rao 			old_ldt_len = pldt->ldt_len;
76205dfa22fSAttilio Rao 			pldt->ldt_sd = new_ldt->ldt_sd;
76305dfa22fSAttilio Rao 			pldt->ldt_base = new_ldt->ldt_base;
76405dfa22fSAttilio Rao 			pldt->ldt_len = new_ldt->ldt_len;
7650ad5e7f3SJeff Roberson 		} else
766dae8d52dSDavid Xu 			mdp->md_ldt = pldt = new_ldt;
767dae8d52dSDavid Xu #ifdef SMP
76802b0a160SAttilio Rao 		/*
76902b0a160SAttilio Rao 		 * Signal other cpus to reload ldt.  We need to unlock dt_lock
77002b0a160SAttilio Rao 		 * here because other CPU will contest on it since their
77102b0a160SAttilio Rao 		 * curthreads won't hold the lock and will block when trying
77202b0a160SAttilio Rao 		 * to acquire it.
77302b0a160SAttilio Rao 		 */
77402b0a160SAttilio Rao 		mtx_unlock_spin(&dt_lock);
775dae8d52dSDavid Xu 		smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv,
77605dfa22fSAttilio Rao 		    NULL, td->td_proc->p_vmspace);
777dae8d52dSDavid Xu #else
778ea11c140SAttilio Rao 		set_user_ldt(&td->td_proc->p_md);
77905dfa22fSAttilio Rao 		mtx_unlock_spin(&dt_lock);
780dae8d52dSDavid Xu #endif
78105dfa22fSAttilio Rao 		if (old_ldt_base != NULL_LDT_BASE) {
7825df87b21SJeff Roberson 			kmem_free(kernel_arena, (vm_offset_t)old_ldt_base,
78305dfa22fSAttilio Rao 			    old_ldt_len * sizeof(union descriptor));
7841ede983cSDag-Erling Smørgrav 			free(new_ldt, M_SUBPROC);
78505dfa22fSAttilio Rao 		}
78605dfa22fSAttilio Rao 		mtx_lock_spin(&dt_lock);
787dae8d52dSDavid Xu 	}
7888eb8107bSPeter Wemm 	return (0);
789da59a31cSDavid Greenman }
790