xref: /netbsd/sys/arch/x86/x86/sys_machdep.c (revision 2c253072)
1 /*	$NetBSD: sys_machdep.c,v 1.58 2022/08/20 23:49:31 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 1998, 2007, 2009, 2017 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum, by Andrew Doran, and by Maxime Villard.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.58 2022/08/20 23:49:31 riastradh Exp $");
34 
35 #include "opt_mtrr.h"
36 #include "opt_user_ldt.h"
37 #include "opt_compat_netbsd.h"
38 #include "opt_xen.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/ioctl.h>
43 #include <sys/file.h>
44 #include <sys/time.h>
45 #include <sys/proc.h>
46 #include <sys/uio.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/signal.h>
50 #include <sys/malloc.h>
51 #include <sys/kmem.h>
52 #include <sys/kauth.h>
53 #include <sys/cpu.h>
54 #include <sys/mount.h>
55 #include <sys/syscallargs.h>
56 
57 #include <uvm/uvm_extern.h>
58 
59 #include <machine/cpufunc.h>
60 #include <machine/gdt.h>
61 #include <machine/psl.h>
62 #include <machine/reg.h>
63 #include <machine/sysarch.h>
64 #include <machine/mtrr.h>
65 #include <machine/pmap_private.h>
66 
67 #if defined(__x86_64__) || defined(XENPV)
68 #undef	IOPERM	/* not implemented */
69 #else
70 #define	IOPERM
71 #endif
72 
73 #if defined(XENPV) && defined(USER_LDT)
74 #error "USER_LDT not supported on XENPV"
75 #endif
76 
77 extern struct vm_map *kernel_map;
78 
79 static int x86_get_ioperm(struct lwp *, void *, register_t *);
80 static int x86_set_ioperm(struct lwp *, void *, register_t *);
81 static int x86_set_sdbase32(void *, char, lwp_t *, bool);
82 int x86_set_sdbase(void *, char, lwp_t *, bool);
83 static int x86_get_sdbase32(void *, char);
84 int x86_get_sdbase(void *, char);
85 
86 #ifdef i386
87 static int
x86_get_ldt(struct lwp * l,void * args,register_t * retval)88 x86_get_ldt(struct lwp *l, void *args, register_t *retval)
89 {
90 #ifndef USER_LDT
91 	return EINVAL;
92 #else
93 	struct x86_get_ldt_args ua;
94 	union descriptor *cp;
95 	int error;
96 
97 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
98 		return error;
99 
100 	if (ua.num < 0 || ua.num > MAX_USERLDT_SLOTS)
101 		return EINVAL;
102 
103 	cp = malloc(ua.num * sizeof(union descriptor), M_TEMP, M_WAITOK);
104 	if (cp == NULL)
105 		return ENOMEM;
106 
107 	error = x86_get_ldt1(l, &ua, cp);
108 	*retval = ua.num;
109 	if (error == 0)
110 		error = copyout(cp, ua.desc, ua.num * sizeof(*cp));
111 
112 	free(cp, M_TEMP);
113 	return error;
114 #endif
115 }
116 #endif
117 
118 int
x86_get_ldt1(struct lwp * l,struct x86_get_ldt_args * ua,union descriptor * cp)119 x86_get_ldt1(struct lwp *l, struct x86_get_ldt_args *ua, union descriptor *cp)
120 {
121 #ifndef USER_LDT
122 	return EINVAL;
123 #else
124 	int error;
125 	struct proc *p = l->l_proc;
126 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
127 	int nldt, num;
128 	union descriptor *lp;
129 
130 #ifdef __x86_64__
131 	const size_t min_ldt_size = LDT_SIZE;
132 #else
133 	const size_t min_ldt_size = NLDT * sizeof(union descriptor);
134 #endif
135 
136 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_GET,
137 	    NULL, NULL, NULL, NULL);
138 	if (error)
139 		return error;
140 
141 	if (ua->start < 0 || ua->num < 0 ||
142 	    ua->start > MAX_USERLDT_SLOTS || ua->num > MAX_USERLDT_SLOTS ||
143 	    ua->start + ua->num > MAX_USERLDT_SLOTS)
144 		return EINVAL;
145 
146 	if (ua->start * sizeof(union descriptor) < min_ldt_size)
147 		return EINVAL;
148 
149 	mutex_enter(&cpu_lock);
150 
151 	if (pmap->pm_ldt != NULL) {
152 		nldt = MAX_USERLDT_SIZE / sizeof(*lp);
153 		lp = pmap->pm_ldt;
154 	} else {
155 #ifdef __x86_64__
156 		nldt = LDT_SIZE / sizeof(*lp);
157 #else
158 		nldt = NLDT;
159 #endif
160 		lp = (union descriptor *)ldtstore;
161 	}
162 
163 	if (ua->start > nldt) {
164 		mutex_exit(&cpu_lock);
165 		return EINVAL;
166 	}
167 
168 	lp += ua->start;
169 	num = uimin(ua->num, nldt - ua->start);
170 	ua->num = num;
171 
172 	memcpy(cp, lp, num * sizeof(union descriptor));
173 	mutex_exit(&cpu_lock);
174 
175 	return 0;
176 #endif
177 }
178 
179 #ifdef i386
180 static int
x86_set_ldt(struct lwp * l,void * args,register_t * retval)181 x86_set_ldt(struct lwp *l, void *args, register_t *retval)
182 {
183 #ifndef USER_LDT
184 	return EINVAL;
185 #else
186 	struct x86_set_ldt_args ua;
187 	union descriptor *descv;
188 	int error;
189 
190 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
191 		return error;
192 
193 	if (ua.num < 0 || ua.num > MAX_USERLDT_SLOTS)
194 		return EINVAL;
195 
196 	descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_WAITOK);
197 	error = copyin(ua.desc, descv, sizeof (*descv) * ua.num);
198 	if (error == 0)
199 		error = x86_set_ldt1(l, &ua, descv);
200 	*retval = ua.start;
201 
202 	free(descv, M_TEMP);
203 	return error;
204 #endif
205 }
206 #endif
207 
208 int
x86_set_ldt1(struct lwp * l,struct x86_set_ldt_args * ua,union descriptor * descv)209 x86_set_ldt1(struct lwp *l, struct x86_set_ldt_args *ua,
210     union descriptor *descv)
211 {
212 #ifndef USER_LDT
213 	return EINVAL;
214 #else
215 	int error, i, n, old_sel, new_sel;
216 	struct proc *p = l->l_proc;
217 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
218 	union descriptor *old_ldt, *new_ldt;
219 
220 #ifdef __x86_64__
221 	const size_t min_ldt_size = LDT_SIZE;
222 #else
223 	const size_t min_ldt_size = NLDT * sizeof(union descriptor);
224 #endif
225 
226 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_LDT_SET,
227 	    NULL, NULL, NULL, NULL);
228 	if (error)
229 		return error;
230 
231 	if (ua->start < 0 || ua->num < 0 ||
232 	    ua->start > MAX_USERLDT_SLOTS || ua->num > MAX_USERLDT_SLOTS ||
233 	    ua->start + ua->num > MAX_USERLDT_SLOTS)
234 		return EINVAL;
235 
236 	if (ua->start * sizeof(union descriptor) < min_ldt_size)
237 		return EINVAL;
238 
239 	/* Check descriptors for access violations. */
240 	for (i = 0; i < ua->num; i++) {
241 		union descriptor *desc = &descv[i];
242 
243 #ifdef __x86_64__
244 		if (desc->sd.sd_long != 0)
245 			return EACCES;
246 #endif
247 
248 		switch (desc->sd.sd_type) {
249 		case SDT_SYSNULL:
250 			desc->sd.sd_p = 0;
251 			break;
252 		case SDT_MEMEC:
253 		case SDT_MEMEAC:
254 		case SDT_MEMERC:
255 		case SDT_MEMERAC:
256 			/* Must be "present" if executable and conforming. */
257 			if (desc->sd.sd_p == 0)
258 				return EACCES;
259 			break;
260 		case SDT_MEMRO:
261 		case SDT_MEMROA:
262 		case SDT_MEMRW:
263 		case SDT_MEMRWA:
264 		case SDT_MEMROD:
265 		case SDT_MEMRODA:
266 		case SDT_MEMRWD:
267 		case SDT_MEMRWDA:
268 		case SDT_MEME:
269 		case SDT_MEMEA:
270 		case SDT_MEMER:
271 		case SDT_MEMERA:
272 			break;
273 		default:
274 			return EACCES;
275 		}
276 
277 		if (desc->sd.sd_p != 0) {
278 			/* Only user (ring-3) descriptors may be present. */
279 			if (desc->sd.sd_dpl != SEL_UPL)
280 				return EACCES;
281 		}
282 	}
283 
284 	/*
285 	 * Install selected changes.
286 	 */
287 
288 	/* Allocate a new LDT. */
289 	new_ldt = (union descriptor *)uvm_km_alloc(kernel_map,
290 	    MAX_USERLDT_SIZE, 0, UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_WAITVA);
291 
292 	mutex_enter(&cpu_lock);
293 
294 	/* Copy existing entries, if any. */
295 	if (pmap->pm_ldt != NULL) {
296 		old_ldt = pmap->pm_ldt;
297 		old_sel = pmap->pm_ldt_sel;
298 		memcpy(new_ldt, old_ldt, MAX_USERLDT_SIZE);
299 	} else {
300 		old_ldt = NULL;
301 		old_sel = -1;
302 		memcpy(new_ldt, ldtstore, min_ldt_size);
303 	}
304 
305 	/* Apply requested changes. */
306 	for (i = 0, n = ua->start; i < ua->num; i++, n++) {
307 		new_ldt[n] = descv[i];
308 	}
309 
310 	/* Allocate LDT selector. */
311 	new_sel = ldt_alloc(new_ldt, MAX_USERLDT_SIZE);
312 	if (new_sel == -1) {
313 		mutex_exit(&cpu_lock);
314 		uvm_km_free(kernel_map, (vaddr_t)new_ldt, MAX_USERLDT_SIZE,
315 		    UVM_KMF_WIRED);
316 		return ENOMEM;
317 	}
318 
319 	/* All changes are now globally visible.  Swap in the new LDT. */
320 	atomic_store_relaxed(&pmap->pm_ldt_sel, new_sel);
321 	/* membar_store_store for pmap_fork() to read these unlocked safely */
322 	membar_producer();
323 	atomic_store_relaxed(&pmap->pm_ldt, new_ldt);
324 
325 	/* Switch existing users onto new LDT. */
326 	pmap_ldt_sync(pmap);
327 
328 	/* Free existing LDT (if any). */
329 	if (old_ldt != NULL) {
330 		ldt_free(old_sel);
331 		/* exit the mutex before free */
332 		mutex_exit(&cpu_lock);
333 		uvm_km_free(kernel_map, (vaddr_t)old_ldt, MAX_USERLDT_SIZE,
334 		    UVM_KMF_WIRED);
335 	} else {
336 		mutex_exit(&cpu_lock);
337 	}
338 
339 	return error;
340 #endif
341 }
342 
343 int
x86_iopl(struct lwp * l,void * args,register_t * retval)344 x86_iopl(struct lwp *l, void *args, register_t *retval)
345 {
346 	int error;
347 	struct x86_iopl_args ua;
348 #ifdef XENPV
349 	int iopl;
350 #else
351 	struct trapframe *tf = l->l_md.md_regs;
352 #endif
353 
354 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPL,
355 	    NULL, NULL, NULL, NULL);
356 	if (error)
357 		return error;
358 
359 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
360 		return error;
361 
362 #ifdef XENPV
363 	if (ua.iopl)
364 		iopl = SEL_UPL;
365 	else
366 		iopl = SEL_KPL;
367 
368     {
369 	struct pcb *pcb;
370 
371 	pcb = lwp_getpcb(l);
372 	pcb->pcb_iopl = iopl;
373 
374 	/* Force the change at ring 0. */
375 	struct physdev_set_iopl set_iopl;
376 	set_iopl.iopl = iopl;
377 	HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
378     }
379 #elif defined(__x86_64__)
380 	if (ua.iopl)
381 		tf->tf_rflags |= PSL_IOPL;
382 	else
383 		tf->tf_rflags &= ~PSL_IOPL;
384 #else
385 	if (ua.iopl)
386 		tf->tf_eflags |= PSL_IOPL;
387 	else
388 		tf->tf_eflags &= ~PSL_IOPL;
389 #endif
390 
391 	return 0;
392 }
393 
394 static int
x86_get_ioperm(struct lwp * l,void * args,register_t * retval)395 x86_get_ioperm(struct lwp *l, void *args, register_t *retval)
396 {
397 #ifdef IOPERM
398 	int error;
399 	struct pcb *pcb = lwp_getpcb(l);
400 	struct x86_get_ioperm_args ua;
401 	void *dummymap = NULL;
402 	void *iomap;
403 
404 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_GET,
405 	    NULL, NULL, NULL, NULL);
406 	if (error)
407 		return error;
408 
409 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
410 		return error;
411 
412 	iomap = pcb->pcb_iomap;
413 	if (iomap == NULL) {
414 		iomap = dummymap = kmem_alloc(IOMAPSIZE, KM_SLEEP);
415 		memset(dummymap, 0xff, IOMAPSIZE);
416 	}
417 	error = copyout(iomap, ua.iomap, IOMAPSIZE);
418 	if (dummymap != NULL) {
419 		kmem_free(dummymap, IOMAPSIZE);
420 	}
421 	return error;
422 #else
423 	return EINVAL;
424 #endif
425 }
426 
427 static int
x86_set_ioperm(struct lwp * l,void * args,register_t * retval)428 x86_set_ioperm(struct lwp *l, void *args, register_t *retval)
429 {
430 #ifdef IOPERM
431 	struct cpu_info *ci;
432 	int error;
433 	struct pcb *pcb = lwp_getpcb(l);
434 	struct x86_set_ioperm_args ua;
435 	void *new;
436 	void *old;
437 
438 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPERM_SET,
439 	    NULL, NULL, NULL, NULL);
440 	if (error)
441 		return error;
442 
443 	if ((error = copyin(args, &ua, sizeof(ua))) != 0)
444 		return error;
445 
446 	new = kmem_alloc(IOMAPSIZE, KM_SLEEP);
447 	error = copyin(ua.iomap, new, IOMAPSIZE);
448 	if (error) {
449 		kmem_free(new, IOMAPSIZE);
450 		return error;
451 	}
452 	old = pcb->pcb_iomap;
453 	pcb->pcb_iomap = new;
454 	if (old != NULL) {
455 		kmem_free(old, IOMAPSIZE);
456 	}
457 
458 	CTASSERT(offsetof(struct cpu_tss, iomap) -
459 	    offsetof(struct cpu_tss, tss) == IOMAP_VALIDOFF);
460 
461 	kpreempt_disable();
462 	ci = curcpu();
463 	memcpy(ci->ci_tss->iomap, pcb->pcb_iomap, IOMAPSIZE);
464 	ci->ci_tss->tss.tss_iobase = IOMAP_VALIDOFF << 16;
465 	kpreempt_enable();
466 
467 	return error;
468 #else
469 	return EINVAL;
470 #endif
471 }
472 
473 static int
x86_get_mtrr(struct lwp * l,void * args,register_t * retval)474 x86_get_mtrr(struct lwp *l, void *args, register_t *retval)
475 {
476 #ifdef MTRR
477 	struct x86_get_mtrr_args ua;
478 	int error, n;
479 
480 	if (mtrr_funcs == NULL)
481 		return ENOSYS;
482 
483 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_GET,
484 	    NULL, NULL, NULL, NULL);
485 	if (error)
486 		return error;
487 
488 	error = copyin(args, &ua, sizeof ua);
489 	if (error != 0)
490 		return error;
491 
492 	error = copyin(ua.n, &n, sizeof n);
493 	if (error != 0)
494 		return error;
495 
496 	KERNEL_LOCK(1, NULL);
497 	error = mtrr_get(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
498 	KERNEL_UNLOCK_ONE(NULL);
499 
500 	copyout(&n, ua.n, sizeof (int));
501 
502 	return error;
503 #else
504 	return EINVAL;
505 #endif
506 }
507 
508 static int
x86_set_mtrr(struct lwp * l,void * args,register_t * retval)509 x86_set_mtrr(struct lwp *l, void *args, register_t *retval)
510 {
511 #ifdef MTRR
512 	int error, n;
513 	struct x86_set_mtrr_args ua;
514 
515 	if (mtrr_funcs == NULL)
516 		return ENOSYS;
517 
518 	error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET,
519 	    NULL, NULL, NULL, NULL);
520 	if (error)
521 		return error;
522 
523 	error = copyin(args, &ua, sizeof ua);
524 	if (error != 0)
525 		return error;
526 
527 	error = copyin(ua.n, &n, sizeof n);
528 	if (error != 0)
529 		return error;
530 
531 	KERNEL_LOCK(1, NULL);
532 	error = mtrr_set(ua.mtrrp, &n, l->l_proc, MTRR_GETSET_USER);
533 	if (n != 0)
534 		mtrr_commit();
535 	KERNEL_UNLOCK_ONE(NULL);
536 
537 	copyout(&n, ua.n, sizeof n);
538 
539 	return error;
540 #else
541 	return EINVAL;
542 #endif
543 }
544 
545 #ifdef __x86_64__
546 #define pcb_fsd pcb_fs
547 #define pcb_gsd pcb_gs
548 #define segment_descriptor mem_segment_descriptor
549 #endif
550 
551 static int
x86_set_sdbase32(void * arg,char which,lwp_t * l,bool direct)552 x86_set_sdbase32(void *arg, char which, lwp_t *l, bool direct)
553 {
554 	struct trapframe *tf = l->l_md.md_regs;
555 	union descriptor usd;
556 	struct pcb *pcb;
557 	uint32_t base;
558 	int error;
559 
560 	if (direct) {
561 		base = (vaddr_t)arg;
562 	} else {
563 		error = copyin(arg, &base, sizeof(base));
564 		if (error != 0)
565 			return error;
566 	}
567 
568 	memset(&usd, 0, sizeof(usd));
569 	usd.sd.sd_lobase = base & 0xffffff;
570 	usd.sd.sd_hibase = (base >> 24) & 0xff;
571 	usd.sd.sd_lolimit = 0xffff;
572 	usd.sd.sd_hilimit = 0xf;
573 	usd.sd.sd_type = SDT_MEMRWA;
574 	usd.sd.sd_dpl = SEL_UPL;
575 	usd.sd.sd_p = 1;
576 	usd.sd.sd_def32 = 1;
577 	usd.sd.sd_gran = 1;
578 
579 	pcb = lwp_getpcb(l);
580 	kpreempt_disable();
581 	if (which == 'f') {
582 		memcpy(&pcb->pcb_fsd, &usd.sd,
583 		    sizeof(struct segment_descriptor));
584 		if (l == curlwp) {
585 			update_descriptor(&curcpu()->ci_gdt[GUFS_SEL], &usd);
586 		}
587 		tf->tf_fs = GSEL(GUFS_SEL, SEL_UPL);
588 	} else /* which == 'g' */ {
589 		memcpy(&pcb->pcb_gsd, &usd.sd,
590 		    sizeof(struct segment_descriptor));
591 		if (l == curlwp) {
592 			update_descriptor(&curcpu()->ci_gdt[GUGS_SEL], &usd);
593 #if defined(__x86_64__) && defined(XENPV)
594 			setusergs(GSEL(GUGS_SEL, SEL_UPL));
595 #endif
596 		}
597 		tf->tf_gs = GSEL(GUGS_SEL, SEL_UPL);
598 	}
599 	kpreempt_enable();
600 	return 0;
601 }
602 
603 int
x86_set_sdbase(void * arg,char which,lwp_t * l,bool direct)604 x86_set_sdbase(void *arg, char which, lwp_t *l, bool direct)
605 {
606 #ifdef i386
607 	return x86_set_sdbase32(arg, which, l, direct);
608 #else
609 	struct pcb *pcb;
610 	vaddr_t base;
611 
612 	if (l->l_proc->p_flag & PK_32) {
613 		return x86_set_sdbase32(arg, which, l, direct);
614 	}
615 
616 	if (direct) {
617 		base = (vaddr_t)arg;
618 	} else {
619 		int error = copyin(arg, &base, sizeof(base));
620 		if (error != 0)
621 			return error;
622 	}
623 
624 	if (base >= VM_MAXUSER_ADDRESS)
625 		return EINVAL;
626 
627 	pcb = lwp_getpcb(l);
628 
629 	kpreempt_disable();
630 	switch (which) {
631 	case 'f':
632 		pcb->pcb_fs = base;
633 		if (l == curlwp)
634 			wrmsr(MSR_FSBASE, pcb->pcb_fs);
635 		break;
636 	case 'g':
637 		pcb->pcb_gs = base;
638 		if (l == curlwp)
639 			wrmsr(MSR_KERNELGSBASE, pcb->pcb_gs);
640 		break;
641 	default:
642 		panic("x86_set_sdbase");
643 	}
644 	kpreempt_enable();
645 
646 	return 0;
647 #endif
648 }
649 
650 static int
x86_get_sdbase32(void * arg,char which)651 x86_get_sdbase32(void *arg, char which)
652 {
653 	struct segment_descriptor *sd;
654 	uint32_t base;
655 
656 	switch (which) {
657 	case 'f':
658 		sd = (void *)&curpcb->pcb_fsd;
659 		break;
660 	case 'g':
661 		sd = (void *)&curpcb->pcb_gsd;
662 		break;
663 	default:
664 		panic("x86_get_sdbase32");
665 	}
666 
667 	base = sd->sd_hibase << 24 | sd->sd_lobase;
668 	return copyout(&base, arg, sizeof(base));
669 }
670 
671 int
x86_get_sdbase(void * arg,char which)672 x86_get_sdbase(void *arg, char which)
673 {
674 #ifdef i386
675 	return x86_get_sdbase32(arg, which);
676 #else
677 	vaddr_t base;
678 	struct pcb *pcb;
679 
680 	if (curproc->p_flag & PK_32) {
681 		return x86_get_sdbase32(arg, which);
682 	}
683 
684 	pcb = lwp_getpcb(curlwp);
685 
686 	switch (which) {
687 	case 'f':
688 		base = pcb->pcb_fs;
689 		break;
690 	case 'g':
691 		base = pcb->pcb_gs;
692 		break;
693 	default:
694 		panic("x86_get_sdbase");
695 	}
696 
697 	return copyout(&base, arg, sizeof(base));
698 #endif
699 }
700 
701 int
sys_sysarch(struct lwp * l,const struct sys_sysarch_args * uap,register_t * retval)702 sys_sysarch(struct lwp *l, const struct sys_sysarch_args *uap,
703     register_t *retval)
704 {
705 	/* {
706 		syscallarg(int) op;
707 		syscallarg(void *) parms;
708 	} */
709 	int error = 0;
710 
711 	switch (SCARG(uap, op)) {
712 	case X86_IOPL:
713 		error = x86_iopl(l, SCARG(uap, parms), retval);
714 		break;
715 
716 #ifdef i386
717 	/*
718 	 * On amd64, this is done via netbsd32_sysarch.
719 	 */
720 	case X86_GET_LDT:
721 		error = x86_get_ldt(l, SCARG(uap, parms), retval);
722 		break;
723 
724 	case X86_SET_LDT:
725 		error = x86_set_ldt(l, SCARG(uap, parms), retval);
726 		break;
727 #endif
728 
729 	case X86_GET_IOPERM:
730 		error = x86_get_ioperm(l, SCARG(uap, parms), retval);
731 		break;
732 
733 	case X86_SET_IOPERM:
734 		error = x86_set_ioperm(l, SCARG(uap, parms), retval);
735 		break;
736 
737 	case X86_GET_MTRR:
738 		error = x86_get_mtrr(l, SCARG(uap, parms), retval);
739 		break;
740 	case X86_SET_MTRR:
741 		error = x86_set_mtrr(l, SCARG(uap, parms), retval);
742 		break;
743 
744 	case X86_SET_FSBASE:
745 		error = x86_set_sdbase(SCARG(uap, parms), 'f', curlwp, false);
746 		break;
747 
748 	case X86_SET_GSBASE:
749 		error = x86_set_sdbase(SCARG(uap, parms), 'g', curlwp, false);
750 		break;
751 
752 	case X86_GET_FSBASE:
753 		error = x86_get_sdbase(SCARG(uap, parms), 'f');
754 		break;
755 
756 	case X86_GET_GSBASE:
757 		error = x86_get_sdbase(SCARG(uap, parms), 'g');
758 		break;
759 
760 	default:
761 		error = EINVAL;
762 		break;
763 	}
764 	return error;
765 }
766 
767 int
cpu_lwp_setprivate(lwp_t * l,void * addr)768 cpu_lwp_setprivate(lwp_t *l, void *addr)
769 {
770 
771 #ifdef __x86_64__
772 	if ((l->l_proc->p_flag & PK_32) == 0) {
773 		return x86_set_sdbase(addr, 'f', l, true);
774 	}
775 #endif
776 	return x86_set_sdbase(addr, 'g', l, true);
777 }
778