1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2003 Peter Wemm.
5 * Copyright (c) 1990 The Regents of the University of California.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 #include "opt_capsicum.h"
35 #include "opt_ktrace.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/capsicum.h>
40 #include <sys/kernel.h>
41 #include <sys/ktrace.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/pcpu.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/smp.h>
49 #include <sys/sysproto.h>
50 #include <sys/uio.h>
51
52 #include <vm/vm.h>
53 #include <vm/pmap.h>
54 #include <vm/vm_kern.h> /* for kernel_map */
55 #include <vm/vm_map.h>
56 #include <vm/vm_extern.h>
57
58 #include <machine/frame.h>
59 #include <machine/md_var.h>
60 #include <machine/pcb.h>
61 #include <machine/specialreg.h>
62 #include <machine/sysarch.h>
63 #include <machine/tss.h>
64 #include <machine/vmparam.h>
65
66 #include <security/audit/audit.h>
67
68 static void user_ldt_deref(struct proc_ldt *pldt);
69 static void user_ldt_derefl(struct proc_ldt *pldt);
70
71 #define MAX_LD 8192
72
73 int max_ldt_segment = 512;
74 SYSCTL_INT(_machdep, OID_AUTO, max_ldt_segment, CTLFLAG_RDTUN,
75 &max_ldt_segment, 0,
76 "Maximum number of allowed LDT segments in the single address space");
77
78 static void
max_ldt_segment_init(void * arg __unused)79 max_ldt_segment_init(void *arg __unused)
80 {
81
82 if (max_ldt_segment <= 0)
83 max_ldt_segment = 1;
84 if (max_ldt_segment > MAX_LD)
85 max_ldt_segment = MAX_LD;
86 }
87 SYSINIT(maxldt, SI_SUB_VM_CONF, SI_ORDER_ANY, max_ldt_segment_init, NULL);
88
89 #ifndef _SYS_SYSPROTO_H_
90 struct sysarch_args {
91 int op;
92 char *parms;
93 };
94 #endif
95
96 int
sysarch_ldt(struct thread * td,struct sysarch_args * uap,int uap_space)97 sysarch_ldt(struct thread *td, struct sysarch_args *uap, int uap_space)
98 {
99 struct i386_ldt_args *largs, la;
100 struct user_segment_descriptor *lp;
101 int error = 0;
102
103 /*
104 * XXXKIB check that the BSM generation code knows to encode
105 * the op argument.
106 */
107 AUDIT_ARG_CMD(uap->op);
108 if (uap_space == UIO_USERSPACE) {
109 error = copyin(uap->parms, &la, sizeof(struct i386_ldt_args));
110 if (error != 0)
111 return (error);
112 largs = &la;
113 } else
114 largs = (struct i386_ldt_args *)uap->parms;
115
116 switch (uap->op) {
117 case I386_GET_LDT:
118 error = amd64_get_ldt(td, largs);
119 break;
120 case I386_SET_LDT:
121 if (largs->descs != NULL && largs->num > max_ldt_segment)
122 return (EINVAL);
123 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
124 if (largs->descs != NULL) {
125 lp = malloc(largs->num * sizeof(struct
126 user_segment_descriptor), M_TEMP, M_WAITOK);
127 error = copyin(largs->descs, lp, largs->num *
128 sizeof(struct user_segment_descriptor));
129 if (error == 0)
130 error = amd64_set_ldt(td, largs, lp);
131 free(lp, M_TEMP);
132 } else {
133 error = amd64_set_ldt(td, largs, NULL);
134 }
135 break;
136 }
137 return (error);
138 }
139
140 void
update_gdt_gsbase(struct thread * td,uint32_t base)141 update_gdt_gsbase(struct thread *td, uint32_t base)
142 {
143 struct user_segment_descriptor *sd;
144
145 if (td != curthread)
146 return;
147 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
148 critical_enter();
149 sd = PCPU_GET(gs32p);
150 sd->sd_lobase = base & 0xffffff;
151 sd->sd_hibase = (base >> 24) & 0xff;
152 critical_exit();
153 }
154
155 void
update_gdt_fsbase(struct thread * td,uint32_t base)156 update_gdt_fsbase(struct thread *td, uint32_t base)
157 {
158 struct user_segment_descriptor *sd;
159
160 if (td != curthread)
161 return;
162 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
163 critical_enter();
164 sd = PCPU_GET(fs32p);
165 sd->sd_lobase = base & 0xffffff;
166 sd->sd_hibase = (base >> 24) & 0xff;
167 critical_exit();
168 }
169
170 int
sysarch(struct thread * td,struct sysarch_args * uap)171 sysarch(struct thread *td, struct sysarch_args *uap)
172 {
173 struct pcb *pcb;
174 struct vm_map *map;
175 uint32_t i386base;
176 uint64_t a64base;
177 struct i386_ioperm_args iargs;
178 struct i386_get_xfpustate i386xfpu;
179 struct i386_set_pkru i386pkru;
180 struct amd64_get_xfpustate a64xfpu;
181 struct amd64_set_pkru a64pkru;
182 int error;
183
184 #ifdef CAPABILITY_MODE
185 /*
186 * When adding new operations, add a new case statement here to
187 * explicitly indicate whether or not the operation is safe to
188 * perform in capability mode.
189 */
190 switch (uap->op) {
191 case I386_GET_LDT:
192 case I386_SET_LDT:
193 case I386_GET_IOPERM:
194 case I386_GET_FSBASE:
195 case I386_SET_FSBASE:
196 case I386_GET_GSBASE:
197 case I386_SET_GSBASE:
198 case I386_GET_XFPUSTATE:
199 case I386_SET_PKRU:
200 case I386_CLEAR_PKRU:
201 case AMD64_GET_FSBASE:
202 case AMD64_SET_FSBASE:
203 case AMD64_GET_GSBASE:
204 case AMD64_SET_GSBASE:
205 case AMD64_GET_XFPUSTATE:
206 case AMD64_SET_PKRU:
207 case AMD64_CLEAR_PKRU:
208 break;
209
210 case I386_SET_IOPERM:
211 default:
212 if (CAP_TRACING(td))
213 ktrcapfail(CAPFAIL_SYSCALL, &uap->op);
214 if (IN_CAPABILITY_MODE(td))
215 return (ECAPMODE);
216 break;
217 }
218 #endif
219
220 if (uap->op == I386_GET_LDT || uap->op == I386_SET_LDT)
221 return (sysarch_ldt(td, uap, UIO_USERSPACE));
222
223 error = 0;
224 pcb = td->td_pcb;
225
226 /*
227 * XXXKIB check that the BSM generation code knows to encode
228 * the op argument.
229 */
230 AUDIT_ARG_CMD(uap->op);
231 switch (uap->op) {
232 case I386_GET_IOPERM:
233 case I386_SET_IOPERM:
234 if ((error = copyin(uap->parms, &iargs,
235 sizeof(struct i386_ioperm_args))) != 0)
236 return (error);
237 break;
238 case I386_GET_XFPUSTATE:
239 if ((error = copyin(uap->parms, &i386xfpu,
240 sizeof(struct i386_get_xfpustate))) != 0)
241 return (error);
242 a64xfpu.addr = (void *)(uintptr_t)i386xfpu.addr;
243 a64xfpu.len = i386xfpu.len;
244 break;
245 case I386_SET_PKRU:
246 case I386_CLEAR_PKRU:
247 if ((error = copyin(uap->parms, &i386pkru,
248 sizeof(struct i386_set_pkru))) != 0)
249 return (error);
250 a64pkru.addr = (void *)(uintptr_t)i386pkru.addr;
251 a64pkru.len = i386pkru.len;
252 a64pkru.keyidx = i386pkru.keyidx;
253 a64pkru.flags = i386pkru.flags;
254 break;
255 case AMD64_GET_XFPUSTATE:
256 if ((error = copyin(uap->parms, &a64xfpu,
257 sizeof(struct amd64_get_xfpustate))) != 0)
258 return (error);
259 break;
260 case AMD64_SET_PKRU:
261 case AMD64_CLEAR_PKRU:
262 if ((error = copyin(uap->parms, &a64pkru,
263 sizeof(struct amd64_set_pkru))) != 0)
264 return (error);
265 break;
266 default:
267 break;
268 }
269
270 switch (uap->op) {
271 case I386_GET_IOPERM:
272 error = amd64_get_ioperm(td, &iargs);
273 if (error == 0)
274 error = copyout(&iargs, uap->parms,
275 sizeof(struct i386_ioperm_args));
276 break;
277 case I386_SET_IOPERM:
278 error = amd64_set_ioperm(td, &iargs);
279 break;
280 case I386_GET_FSBASE:
281 update_pcb_bases(pcb);
282 i386base = pcb->pcb_fsbase;
283 error = copyout(&i386base, uap->parms, sizeof(i386base));
284 break;
285 case I386_SET_FSBASE:
286 error = copyin(uap->parms, &i386base, sizeof(i386base));
287 if (!error) {
288 set_pcb_flags(pcb, PCB_FULL_IRET);
289 pcb->pcb_fsbase = i386base;
290 td->td_frame->tf_fs = _ufssel;
291 update_gdt_fsbase(td, i386base);
292 }
293 break;
294 case I386_GET_GSBASE:
295 update_pcb_bases(pcb);
296 i386base = pcb->pcb_gsbase;
297 error = copyout(&i386base, uap->parms, sizeof(i386base));
298 break;
299 case I386_SET_GSBASE:
300 error = copyin(uap->parms, &i386base, sizeof(i386base));
301 if (!error) {
302 set_pcb_flags(pcb, PCB_FULL_IRET);
303 pcb->pcb_gsbase = i386base;
304 td->td_frame->tf_gs = _ugssel;
305 update_gdt_gsbase(td, i386base);
306 }
307 break;
308 case AMD64_GET_FSBASE:
309 update_pcb_bases(pcb);
310 error = copyout(&pcb->pcb_fsbase, uap->parms,
311 sizeof(pcb->pcb_fsbase));
312 break;
313
314 case AMD64_SET_FSBASE:
315 error = copyin(uap->parms, &a64base, sizeof(a64base));
316 if (!error) {
317 if (a64base < VM_MAXUSER_ADDRESS) {
318 set_pcb_flags(pcb, PCB_FULL_IRET);
319 pcb->pcb_fsbase = a64base;
320 td->td_frame->tf_fs = _ufssel;
321 } else
322 error = EINVAL;
323 }
324 break;
325
326 case AMD64_GET_GSBASE:
327 update_pcb_bases(pcb);
328 error = copyout(&pcb->pcb_gsbase, uap->parms,
329 sizeof(pcb->pcb_gsbase));
330 break;
331
332 case AMD64_SET_GSBASE:
333 error = copyin(uap->parms, &a64base, sizeof(a64base));
334 if (!error) {
335 if (a64base < VM_MAXUSER_ADDRESS) {
336 set_pcb_flags(pcb, PCB_FULL_IRET);
337 pcb->pcb_gsbase = a64base;
338 td->td_frame->tf_gs = _ugssel;
339 } else
340 error = EINVAL;
341 }
342 break;
343
344 case I386_GET_XFPUSTATE:
345 case AMD64_GET_XFPUSTATE:
346 if (a64xfpu.len > cpu_max_ext_state_size -
347 sizeof(struct savefpu))
348 return (EINVAL);
349 fpugetregs(td);
350 error = copyout((char *)(get_pcb_user_save_td(td) + 1),
351 a64xfpu.addr, a64xfpu.len);
352 break;
353
354 case I386_SET_PKRU:
355 case AMD64_SET_PKRU:
356 /*
357 * Read-lock the map to synchronize with parallel
358 * pmap_vmspace_copy() on fork.
359 */
360 map = &td->td_proc->p_vmspace->vm_map;
361 vm_map_lock_read(map);
362 error = pmap_pkru_set(PCPU_GET(curpmap),
363 (vm_offset_t)a64pkru.addr, (vm_offset_t)a64pkru.addr +
364 a64pkru.len, a64pkru.keyidx, a64pkru.flags);
365 vm_map_unlock_read(map);
366 break;
367
368 case I386_CLEAR_PKRU:
369 case AMD64_CLEAR_PKRU:
370 if (a64pkru.flags != 0 || a64pkru.keyidx != 0) {
371 error = EINVAL;
372 break;
373 }
374 map = &td->td_proc->p_vmspace->vm_map;
375 vm_map_lock_read(map);
376 error = pmap_pkru_clear(PCPU_GET(curpmap),
377 (vm_offset_t)a64pkru.addr,
378 (vm_offset_t)a64pkru.addr + a64pkru.len);
379 vm_map_unlock_read(map);
380 break;
381
382 default:
383 error = EINVAL;
384 break;
385 }
386 return (error);
387 }
388
389 int
amd64_set_ioperm(struct thread * td,struct i386_ioperm_args * uap)390 amd64_set_ioperm(struct thread *td, struct i386_ioperm_args *uap)
391 {
392 char *iomap;
393 struct amd64tss *tssp;
394 struct system_segment_descriptor *tss_sd;
395 struct pcb *pcb;
396 u_int i;
397 int error;
398
399 if ((error = priv_check(td, PRIV_IO)) != 0)
400 return (error);
401 if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
402 return (error);
403 if (uap->start > uap->start + uap->length ||
404 uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
405 return (EINVAL);
406
407 /*
408 * XXX
409 * While this is restricted to root, we should probably figure out
410 * whether any other driver is using this i/o address, as so not to
411 * cause confusion. This probably requires a global 'usage registry'.
412 */
413 pcb = td->td_pcb;
414 if (pcb->pcb_tssp == NULL) {
415 tssp = kmem_malloc(ctob(IOPAGES + 1), M_WAITOK);
416 pmap_pti_add_kva((vm_offset_t)tssp, (vm_offset_t)tssp +
417 ctob(IOPAGES + 1), false);
418 iomap = (char *)&tssp[1];
419 memset(iomap, 0xff, IOPERM_BITMAP_SIZE);
420 critical_enter();
421 /* Takes care of tss_rsp0. */
422 memcpy(tssp, PCPU_PTR(common_tss), sizeof(struct amd64tss));
423 tssp->tss_iobase = sizeof(*tssp);
424 pcb->pcb_tssp = tssp;
425 tss_sd = PCPU_GET(tss);
426 tss_sd->sd_lobase = (u_long)tssp & 0xffffff;
427 tss_sd->sd_hibase = ((u_long)tssp >> 24) & 0xfffffffffful;
428 tss_sd->sd_type = SDT_SYSTSS;
429 ltr(GSEL(GPROC0_SEL, SEL_KPL));
430 PCPU_SET(tssp, tssp);
431 critical_exit();
432 } else
433 iomap = (char *)&pcb->pcb_tssp[1];
434 for (i = uap->start; i < uap->start + uap->length; i++) {
435 if (uap->enable)
436 iomap[i >> 3] &= ~(1 << (i & 7));
437 else
438 iomap[i >> 3] |= (1 << (i & 7));
439 }
440 return (error);
441 }
442
443 int
amd64_get_ioperm(struct thread * td,struct i386_ioperm_args * uap)444 amd64_get_ioperm(struct thread *td, struct i386_ioperm_args *uap)
445 {
446 int i, state;
447 char *iomap;
448
449 if (uap->start >= IOPAGES * PAGE_SIZE * NBBY)
450 return (EINVAL);
451 if (td->td_pcb->pcb_tssp == NULL) {
452 uap->length = 0;
453 goto done;
454 }
455
456 iomap = (char *)&td->td_pcb->pcb_tssp[1];
457
458 i = uap->start;
459 state = (iomap[i >> 3] >> (i & 7)) & 1;
460 uap->enable = !state;
461 uap->length = 1;
462
463 for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) {
464 if (state != ((iomap[i >> 3] >> (i & 7)) & 1))
465 break;
466 uap->length++;
467 }
468
469 done:
470 return (0);
471 }
472
473 /*
474 * Update the GDT entry pointing to the LDT to point to the LDT of the
475 * current process.
476 */
477 static void
set_user_ldt(struct mdproc * mdp)478 set_user_ldt(struct mdproc *mdp)
479 {
480
481 *PCPU_GET(ldt) = mdp->md_ldt_sd;
482 lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
483 }
484
485 static void
set_user_ldt_rv(void * arg)486 set_user_ldt_rv(void *arg)
487 {
488 struct proc *orig, *target;
489 struct proc_ldt *ldt;
490
491 orig = arg;
492 target = curthread->td_proc;
493
494 ldt = (void *)atomic_load_acq_ptr((uintptr_t *)&orig->p_md.md_ldt);
495 if (target->p_md.md_ldt != ldt)
496 return;
497
498 set_user_ldt(&target->p_md);
499 }
500
501 struct proc_ldt *
user_ldt_alloc(struct proc * p,int force)502 user_ldt_alloc(struct proc *p, int force)
503 {
504 struct proc_ldt *pldt, *new_ldt;
505 struct mdproc *mdp;
506 struct soft_segment_descriptor sldt;
507 vm_offset_t sva;
508 vm_size_t sz;
509
510 mtx_assert(&dt_lock, MA_OWNED);
511 mdp = &p->p_md;
512 if (!force && mdp->md_ldt != NULL)
513 return (mdp->md_ldt);
514 mtx_unlock(&dt_lock);
515 new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK);
516 sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
517 new_ldt->ldt_base = kmem_malloc(sz, M_WAITOK | M_ZERO);
518 sva = (uintptr_t)new_ldt->ldt_base;
519 pmap_pti_add_kva(sva, sva + sz, false);
520 new_ldt->ldt_refcnt = 1;
521 sldt.ssd_base = sva;
522 sldt.ssd_limit = sz - 1;
523 sldt.ssd_type = SDT_SYSLDT;
524 sldt.ssd_dpl = SEL_KPL;
525 sldt.ssd_p = 1;
526 sldt.ssd_long = 0;
527 sldt.ssd_def32 = 0;
528 sldt.ssd_gran = 0;
529 mtx_lock(&dt_lock);
530 pldt = mdp->md_ldt;
531 if (pldt != NULL && !force) {
532 pmap_pti_remove_kva(sva, sva + sz);
533 kmem_free(new_ldt->ldt_base, sz);
534 free(new_ldt, M_SUBPROC);
535 return (pldt);
536 }
537
538 if (pldt != NULL) {
539 bcopy(pldt->ldt_base, new_ldt->ldt_base, max_ldt_segment *
540 sizeof(struct user_segment_descriptor));
541 user_ldt_derefl(pldt);
542 }
543 critical_enter();
544 ssdtosyssd(&sldt, &p->p_md.md_ldt_sd);
545 atomic_thread_fence_rel();
546 mdp->md_ldt = new_ldt;
547 critical_exit();
548 smp_rendezvous(NULL, set_user_ldt_rv, NULL, p);
549
550 return (mdp->md_ldt);
551 }
552
553 void
user_ldt_free(struct thread * td)554 user_ldt_free(struct thread *td)
555 {
556 struct proc *p = td->td_proc;
557 struct mdproc *mdp = &p->p_md;
558 struct proc_ldt *pldt;
559
560 mtx_lock(&dt_lock);
561 if ((pldt = mdp->md_ldt) == NULL) {
562 mtx_unlock(&dt_lock);
563 return;
564 }
565
566 critical_enter();
567 mdp->md_ldt = NULL;
568 atomic_thread_fence_rel();
569 bzero(&mdp->md_ldt_sd, sizeof(mdp->md_ldt_sd));
570 if (td == curthread)
571 lldt(GSEL(GNULL_SEL, SEL_KPL));
572 critical_exit();
573 user_ldt_deref(pldt);
574 }
575
576 static void
user_ldt_derefl(struct proc_ldt * pldt)577 user_ldt_derefl(struct proc_ldt *pldt)
578 {
579 vm_offset_t sva;
580 vm_size_t sz;
581
582 if (--pldt->ldt_refcnt == 0) {
583 sva = (vm_offset_t)pldt->ldt_base;
584 sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
585 pmap_pti_remove_kva(sva, sva + sz);
586 kmem_free(pldt->ldt_base, sz);
587 free(pldt, M_SUBPROC);
588 }
589 }
590
591 static void
user_ldt_deref(struct proc_ldt * pldt)592 user_ldt_deref(struct proc_ldt *pldt)
593 {
594
595 mtx_assert(&dt_lock, MA_OWNED);
596 user_ldt_derefl(pldt);
597 mtx_unlock(&dt_lock);
598 }
599
600 /*
601 * Note for the authors of compat layers (linux, etc): copyout() in
602 * the function below is not a problem since it presents data in
603 * arch-specific format (i.e. i386-specific in this case), not in
604 * the OS-specific one.
605 */
606 int
amd64_get_ldt(struct thread * td,struct i386_ldt_args * uap)607 amd64_get_ldt(struct thread *td, struct i386_ldt_args *uap)
608 {
609 struct proc_ldt *pldt;
610 struct user_segment_descriptor *lp;
611 uint64_t *data;
612 u_int i, num;
613 int error;
614
615 #ifdef DEBUG
616 printf("amd64_get_ldt: start=%u num=%u descs=%p\n",
617 uap->start, uap->num, (void *)uap->descs);
618 #endif
619
620 pldt = td->td_proc->p_md.md_ldt;
621 if (pldt == NULL || uap->start >= max_ldt_segment || uap->num == 0) {
622 td->td_retval[0] = 0;
623 return (0);
624 }
625 num = min(uap->num, max_ldt_segment - uap->start);
626 lp = &((struct user_segment_descriptor *)(pldt->ldt_base))[uap->start];
627 data = malloc(num * sizeof(struct user_segment_descriptor), M_TEMP,
628 M_WAITOK);
629 mtx_lock(&dt_lock);
630 for (i = 0; i < num; i++)
631 data[i] = ((volatile uint64_t *)lp)[i];
632 mtx_unlock(&dt_lock);
633 error = copyout(data, uap->descs, num *
634 sizeof(struct user_segment_descriptor));
635 free(data, M_TEMP);
636 if (error == 0)
637 td->td_retval[0] = num;
638 return (error);
639 }
640
641 int
amd64_set_ldt(struct thread * td,struct i386_ldt_args * uap,struct user_segment_descriptor * descs)642 amd64_set_ldt(struct thread *td, struct i386_ldt_args *uap,
643 struct user_segment_descriptor *descs)
644 {
645 struct mdproc *mdp;
646 struct proc_ldt *pldt;
647 struct user_segment_descriptor *dp;
648 struct proc *p;
649 u_int largest_ld, i;
650 int error;
651
652 #ifdef DEBUG
653 printf("amd64_set_ldt: start=%u num=%u descs=%p\n",
654 uap->start, uap->num, (void *)uap->descs);
655 #endif
656 mdp = &td->td_proc->p_md;
657 error = 0;
658
659 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
660 p = td->td_proc;
661 if (descs == NULL) {
662 /* Free descriptors */
663 if (uap->start == 0 && uap->num == 0)
664 uap->num = max_ldt_segment;
665 if (uap->num == 0)
666 return (EINVAL);
667 if ((pldt = mdp->md_ldt) == NULL ||
668 uap->start >= max_ldt_segment)
669 return (0);
670 largest_ld = uap->start + uap->num;
671 if (largest_ld > max_ldt_segment)
672 largest_ld = max_ldt_segment;
673 if (largest_ld < uap->start)
674 return (EINVAL);
675 mtx_lock(&dt_lock);
676 for (i = uap->start; i < largest_ld; i++)
677 ((volatile uint64_t *)(pldt->ldt_base))[i] = 0;
678 mtx_unlock(&dt_lock);
679 return (0);
680 }
681
682 if (!(uap->start == LDT_AUTO_ALLOC && uap->num == 1)) {
683 /* verify range of descriptors to modify */
684 largest_ld = uap->start + uap->num;
685 if (uap->start >= max_ldt_segment ||
686 largest_ld > max_ldt_segment ||
687 largest_ld < uap->start)
688 return (EINVAL);
689 }
690
691 /* Check descriptors for access violations */
692 for (i = 0; i < uap->num; i++) {
693 dp = &descs[i];
694
695 switch (dp->sd_type) {
696 case SDT_SYSNULL: /* system null */
697 dp->sd_p = 0;
698 break;
699 case SDT_SYS286TSS:
700 case SDT_SYSLDT:
701 case SDT_SYS286BSY:
702 case SDT_SYS286CGT:
703 case SDT_SYSTASKGT:
704 case SDT_SYS286IGT:
705 case SDT_SYS286TGT:
706 case SDT_SYSNULL2:
707 case SDT_SYSTSS:
708 case SDT_SYSNULL3:
709 case SDT_SYSBSY:
710 case SDT_SYSCGT:
711 case SDT_SYSNULL4:
712 case SDT_SYSIGT:
713 case SDT_SYSTGT:
714 return (EACCES);
715
716 /* memory segment types */
717 case SDT_MEMEC: /* memory execute only conforming */
718 case SDT_MEMEAC: /* memory execute only accessed conforming */
719 case SDT_MEMERC: /* memory execute read conforming */
720 case SDT_MEMERAC: /* memory execute read accessed conforming */
721 /* Must be "present" if executable and conforming. */
722 if (dp->sd_p == 0)
723 return (EACCES);
724 break;
725 case SDT_MEMRO: /* memory read only */
726 case SDT_MEMROA: /* memory read only accessed */
727 case SDT_MEMRW: /* memory read write */
728 case SDT_MEMRWA: /* memory read write accessed */
729 case SDT_MEMROD: /* memory read only expand dwn limit */
730 case SDT_MEMRODA: /* memory read only expand dwn lim accessed */
731 case SDT_MEMRWD: /* memory read write expand dwn limit */
732 case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */
733 case SDT_MEME: /* memory execute only */
734 case SDT_MEMEA: /* memory execute only accessed */
735 case SDT_MEMER: /* memory execute read */
736 case SDT_MEMERA: /* memory execute read accessed */
737 break;
738 default:
739 return(EINVAL);
740 }
741
742 /* Only user (ring-3) descriptors may be present. */
743 if ((dp->sd_p != 0) && (dp->sd_dpl != SEL_UPL))
744 return (EACCES);
745 }
746
747 if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
748 /* Allocate a free slot */
749 mtx_lock(&dt_lock);
750 pldt = user_ldt_alloc(p, 0);
751 if (pldt == NULL) {
752 mtx_unlock(&dt_lock);
753 return (ENOMEM);
754 }
755
756 /*
757 * start scanning a bit up to leave room for NVidia and
758 * Wine, which still user the "Blat" method of allocation.
759 */
760 i = 16;
761 dp = &((struct user_segment_descriptor *)(pldt->ldt_base))[i];
762 for (; i < max_ldt_segment; ++i, ++dp) {
763 if (dp->sd_type == SDT_SYSNULL)
764 break;
765 }
766 if (i >= max_ldt_segment) {
767 mtx_unlock(&dt_lock);
768 return (ENOSPC);
769 }
770 uap->start = i;
771 error = amd64_set_ldt_data(td, i, 1, descs);
772 mtx_unlock(&dt_lock);
773 } else {
774 largest_ld = uap->start + uap->num;
775 if (largest_ld > max_ldt_segment)
776 return (EINVAL);
777 mtx_lock(&dt_lock);
778 if (user_ldt_alloc(p, 0) != NULL) {
779 error = amd64_set_ldt_data(td, uap->start, uap->num,
780 descs);
781 }
782 mtx_unlock(&dt_lock);
783 }
784 if (error == 0)
785 td->td_retval[0] = uap->start;
786 return (error);
787 }
788
789 int
amd64_set_ldt_data(struct thread * td,int start,int num,struct user_segment_descriptor * descs)790 amd64_set_ldt_data(struct thread *td, int start, int num,
791 struct user_segment_descriptor *descs)
792 {
793 struct mdproc *mdp;
794 struct proc_ldt *pldt;
795 volatile uint64_t *dst, *src;
796 int i;
797
798 mtx_assert(&dt_lock, MA_OWNED);
799
800 mdp = &td->td_proc->p_md;
801 pldt = mdp->md_ldt;
802 dst = (volatile uint64_t *)(pldt->ldt_base);
803 src = (volatile uint64_t *)descs;
804 for (i = 0; i < num; i++)
805 dst[start + i] = src[i];
806 return (0);
807 }
808