1 /*- 2 * Copyright (c) 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_capsicum.h" 36 #include "opt_kstack_pages.h" 37 38 #include <sys/param.h> 39 #include <sys/capsicum.h> 40 #include <sys/systm.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 #include <sys/priv.h> 45 #include <sys/proc.h> 46 #include <sys/smp.h> 47 #include <sys/sysproto.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 #include <vm/vm_map.h> 52 #include <vm/vm_extern.h> 53 54 #include <machine/atomic.h> 55 #include <machine/cpu.h> 56 #include <machine/pcb.h> 57 #include <machine/pcb_ext.h> 58 #include <machine/proc.h> 59 #include <machine/sysarch.h> 60 61 #include <security/audit/audit.h> 62 63 #include <vm/vm_kern.h> /* for kernel_map */ 64 65 #define MAX_LD 8192 66 #define LD_PER_PAGE 512 67 #define NEW_MAX_LD(num) rounddown2(num + LD_PER_PAGE, LD_PER_PAGE) 68 #define SIZE_FROM_LARGEST_LD(num) (NEW_MAX_LD(num) << 3) 69 #define NULL_LDT_BASE ((caddr_t)NULL) 70 71 #ifdef SMP 72 static void set_user_ldt_rv(struct vmspace *vmsp); 73 #endif 74 static int i386_set_ldt_data(struct thread *, int start, int num, 75 union descriptor *descs); 76 static int i386_ldt_grow(struct thread *td, int len); 77 78 void 79 fill_based_sd(struct segment_descriptor *sdp, uint32_t base) 80 { 81 82 sdp->sd_lobase = base & 0xffffff; 83 sdp->sd_hibase = (base >> 24) & 0xff; 84 sdp->sd_lolimit = 0xffff; /* 4GB limit, wraps around */ 85 sdp->sd_hilimit = 0xf; 86 sdp->sd_type = SDT_MEMRWA; 87 sdp->sd_dpl = SEL_UPL; 88 sdp->sd_p = 1; 89 sdp->sd_xx = 0; 90 sdp->sd_def32 = 1; 91 sdp->sd_gran = 1; 92 } 93 94 #ifndef _SYS_SYSPROTO_H_ 95 struct sysarch_args { 96 int op; 97 char *parms; 98 }; 99 #endif 100 101 int 102 sysarch(struct thread *td, struct sysarch_args *uap) 103 { 104 int error; 105 union descriptor *lp; 106 union { 107 struct i386_ldt_args largs; 108 struct i386_ioperm_args iargs; 109 struct i386_get_xfpustate xfpu; 110 } kargs; 111 uint32_t base; 112 struct segment_descriptor sd, *sdp; 113 114 AUDIT_ARG_CMD(uap->op); 115 116 #ifdef CAPABILITY_MODE 117 /* 118 * When adding new operations, add a new case statement here to 119 * explicitly indicate whether or not the operation is safe to 120 * perform in capability mode. 121 */ 122 if (IN_CAPABILITY_MODE(td)) { 123 switch (uap->op) { 124 case I386_GET_LDT: 125 case I386_SET_LDT: 126 case I386_GET_IOPERM: 127 case I386_GET_FSBASE: 128 case I386_SET_FSBASE: 129 case I386_GET_GSBASE: 130 case I386_SET_GSBASE: 131 case I386_GET_XFPUSTATE: 132 break; 133 134 case I386_SET_IOPERM: 135 default: 136 #ifdef KTRACE 137 if (KTRPOINT(td, KTR_CAPFAIL)) 138 ktrcapfail(CAPFAIL_SYSCALL, NULL, NULL); 139 #endif 140 return (ECAPMODE); 141 } 142 } 143 #endif 144 145 switch (uap->op) { 146 case I386_GET_IOPERM: 147 case I386_SET_IOPERM: 148 if ((error = copyin(uap->parms, &kargs.iargs, 149 sizeof(struct i386_ioperm_args))) != 0) 150 return (error); 151 break; 152 case I386_GET_LDT: 153 case I386_SET_LDT: 154 if ((error = copyin(uap->parms, &kargs.largs, 155 sizeof(struct i386_ldt_args))) != 0) 156 return (error); 157 if (kargs.largs.num > MAX_LD || kargs.largs.num <= 0) 158 return (EINVAL); 159 break; 160 case I386_GET_XFPUSTATE: 161 if ((error = copyin(uap->parms, &kargs.xfpu, 162 sizeof(struct i386_get_xfpustate))) != 0) 163 return (error); 164 break; 165 default: 166 break; 167 } 168 169 switch(uap->op) { 170 case I386_GET_LDT: 171 error = i386_get_ldt(td, &kargs.largs); 172 break; 173 case I386_SET_LDT: 174 if (kargs.largs.descs != NULL) { 175 lp = (union descriptor *)malloc( 176 kargs.largs.num * sizeof(union descriptor), 177 M_TEMP, M_WAITOK); 178 error = copyin(kargs.largs.descs, lp, 179 kargs.largs.num * sizeof(union descriptor)); 180 if (error == 0) 181 error = i386_set_ldt(td, &kargs.largs, lp); 182 free(lp, M_TEMP); 183 } else { 184 error = i386_set_ldt(td, &kargs.largs, NULL); 185 } 186 break; 187 case I386_GET_IOPERM: 188 error = i386_get_ioperm(td, &kargs.iargs); 189 if (error == 0) 190 error = copyout(&kargs.iargs, uap->parms, 191 sizeof(struct i386_ioperm_args)); 192 break; 193 case I386_SET_IOPERM: 194 error = i386_set_ioperm(td, &kargs.iargs); 195 break; 196 case I386_VM86: 197 error = vm86_sysarch(td, uap->parms); 198 break; 199 case I386_GET_FSBASE: 200 sdp = &td->td_pcb->pcb_fsd; 201 base = sdp->sd_hibase << 24 | sdp->sd_lobase; 202 error = copyout(&base, uap->parms, sizeof(base)); 203 break; 204 case I386_SET_FSBASE: 205 error = copyin(uap->parms, &base, sizeof(base)); 206 if (error == 0) { 207 /* 208 * Construct a descriptor and store it in the pcb for 209 * the next context switch. Also store it in the gdt 210 * so that the load of tf_fs into %fs will activate it 211 * at return to userland. 212 */ 213 fill_based_sd(&sd, base); 214 critical_enter(); 215 td->td_pcb->pcb_fsd = sd; 216 PCPU_GET(fsgs_gdt)[0] = sd; 217 critical_exit(); 218 td->td_frame->tf_fs = GSEL(GUFS_SEL, SEL_UPL); 219 } 220 break; 221 case I386_GET_GSBASE: 222 sdp = &td->td_pcb->pcb_gsd; 223 base = sdp->sd_hibase << 24 | sdp->sd_lobase; 224 error = copyout(&base, uap->parms, sizeof(base)); 225 break; 226 case I386_SET_GSBASE: 227 error = copyin(uap->parms, &base, sizeof(base)); 228 if (error == 0) { 229 /* 230 * Construct a descriptor and store it in the pcb for 231 * the next context switch. Also store it in the gdt 232 * because we have to do a load_gs() right now. 233 */ 234 fill_based_sd(&sd, base); 235 critical_enter(); 236 td->td_pcb->pcb_gsd = sd; 237 PCPU_GET(fsgs_gdt)[1] = sd; 238 critical_exit(); 239 load_gs(GSEL(GUGS_SEL, SEL_UPL)); 240 } 241 break; 242 case I386_GET_XFPUSTATE: 243 if (kargs.xfpu.len > cpu_max_ext_state_size - 244 sizeof(union savefpu)) 245 return (EINVAL); 246 npxgetregs(td); 247 error = copyout((char *)(get_pcb_user_save_td(td) + 1), 248 kargs.xfpu.addr, kargs.xfpu.len); 249 break; 250 default: 251 error = EINVAL; 252 break; 253 } 254 return (error); 255 } 256 257 int 258 i386_extend_pcb(struct thread *td) 259 { 260 int i, offset; 261 u_long *addr; 262 struct pcb_ext *ext; 263 struct soft_segment_descriptor ssd = { 264 0, /* segment base address (overwritten) */ 265 ctob(IOPAGES + 1) - 1, /* length */ 266 SDT_SYS386TSS, /* segment type */ 267 0, /* priority level */ 268 1, /* descriptor present */ 269 0, 0, 270 0, /* default 32 size */ 271 0 /* granularity */ 272 }; 273 274 ext = (struct pcb_ext *)kmem_malloc(kernel_arena, ctob(IOPAGES+1), 275 M_WAITOK | M_ZERO); 276 /* -16 is so we can convert a trapframe into vm86trapframe inplace */ 277 ext->ext_tss.tss_esp0 = (vm_offset_t)td->td_pcb - 16; 278 ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); 279 /* 280 * The last byte of the i/o map must be followed by an 0xff byte. 281 * We arbitrarily allocate 16 bytes here, to keep the starting 282 * address on a doubleword boundary. 283 */ 284 offset = PAGE_SIZE - 16; 285 ext->ext_tss.tss_ioopt = 286 (offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16; 287 ext->ext_iomap = (caddr_t)ext + offset; 288 ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32; 289 290 addr = (u_long *)ext->ext_vm86.vm86_intmap; 291 for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++) 292 *addr++ = ~0; 293 294 ssd.ssd_base = (unsigned)&ext->ext_tss; 295 ssd.ssd_limit -= ((unsigned)&ext->ext_tss - (unsigned)ext); 296 ssdtosd(&ssd, &ext->ext_tssd); 297 298 KASSERT(td == curthread, ("giving TSS to !curthread")); 299 KASSERT(td->td_pcb->pcb_ext == 0, ("already have a TSS!")); 300 301 /* Switch to the new TSS. */ 302 critical_enter(); 303 td->td_pcb->pcb_ext = ext; 304 PCPU_SET(private_tss, 1); 305 *PCPU_GET(tss_gdt) = ext->ext_tssd; 306 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 307 critical_exit(); 308 309 return 0; 310 } 311 312 int 313 i386_set_ioperm(td, uap) 314 struct thread *td; 315 struct i386_ioperm_args *uap; 316 { 317 char *iomap; 318 u_int i; 319 int error; 320 321 if ((error = priv_check(td, PRIV_IO)) != 0) 322 return (error); 323 if ((error = securelevel_gt(td->td_ucred, 0)) != 0) 324 return (error); 325 /* 326 * XXX 327 * While this is restricted to root, we should probably figure out 328 * whether any other driver is using this i/o address, as so not to 329 * cause confusion. This probably requires a global 'usage registry'. 330 */ 331 332 if (td->td_pcb->pcb_ext == 0) 333 if ((error = i386_extend_pcb(td)) != 0) 334 return (error); 335 iomap = (char *)td->td_pcb->pcb_ext->ext_iomap; 336 337 if (uap->start > uap->start + uap->length || 338 uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY) 339 return (EINVAL); 340 341 for (i = uap->start; i < uap->start + uap->length; i++) { 342 if (uap->enable) 343 iomap[i >> 3] &= ~(1 << (i & 7)); 344 else 345 iomap[i >> 3] |= (1 << (i & 7)); 346 } 347 return (error); 348 } 349 350 int 351 i386_get_ioperm(td, uap) 352 struct thread *td; 353 struct i386_ioperm_args *uap; 354 { 355 int i, state; 356 char *iomap; 357 358 if (uap->start >= IOPAGES * PAGE_SIZE * NBBY) 359 return (EINVAL); 360 361 if (td->td_pcb->pcb_ext == 0) { 362 uap->length = 0; 363 goto done; 364 } 365 366 iomap = (char *)td->td_pcb->pcb_ext->ext_iomap; 367 368 i = uap->start; 369 state = (iomap[i >> 3] >> (i & 7)) & 1; 370 uap->enable = !state; 371 uap->length = 1; 372 373 for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) { 374 if (state != ((iomap[i >> 3] >> (i & 7)) & 1)) 375 break; 376 uap->length++; 377 } 378 379 done: 380 return (0); 381 } 382 383 /* 384 * Update the GDT entry pointing to the LDT to point to the LDT of the 385 * current process. Manage dt_lock holding/unholding autonomously. 386 */ 387 void 388 set_user_ldt(struct mdproc *mdp) 389 { 390 struct proc_ldt *pldt; 391 int dtlocked; 392 393 dtlocked = 0; 394 if (!mtx_owned(&dt_lock)) { 395 mtx_lock_spin(&dt_lock); 396 dtlocked = 1; 397 } 398 399 pldt = mdp->md_ldt; 400 #ifdef SMP 401 gdt[PCPU_GET(cpuid) * NGDT + GUSERLDT_SEL].sd = pldt->ldt_sd; 402 #else 403 gdt[GUSERLDT_SEL].sd = pldt->ldt_sd; 404 #endif 405 lldt(GSEL(GUSERLDT_SEL, SEL_KPL)); 406 PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL)); 407 if (dtlocked) 408 mtx_unlock_spin(&dt_lock); 409 } 410 411 #ifdef SMP 412 static void 413 set_user_ldt_rv(struct vmspace *vmsp) 414 { 415 struct thread *td; 416 417 td = curthread; 418 if (vmsp != td->td_proc->p_vmspace) 419 return; 420 421 set_user_ldt(&td->td_proc->p_md); 422 } 423 #endif 424 425 /* 426 * dt_lock must be held. Returns with dt_lock held. 427 */ 428 struct proc_ldt * 429 user_ldt_alloc(struct mdproc *mdp, int len) 430 { 431 struct proc_ldt *pldt, *new_ldt; 432 433 mtx_assert(&dt_lock, MA_OWNED); 434 mtx_unlock_spin(&dt_lock); 435 new_ldt = malloc(sizeof(struct proc_ldt), 436 M_SUBPROC, M_WAITOK); 437 438 new_ldt->ldt_len = len = NEW_MAX_LD(len); 439 new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena, 440 len * sizeof(union descriptor), M_WAITOK | M_ZERO); 441 new_ldt->ldt_refcnt = 1; 442 new_ldt->ldt_active = 0; 443 444 mtx_lock_spin(&dt_lock); 445 gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)new_ldt->ldt_base; 446 gdt_segs[GUSERLDT_SEL].ssd_limit = len * sizeof(union descriptor) - 1; 447 ssdtosd(&gdt_segs[GUSERLDT_SEL], &new_ldt->ldt_sd); 448 449 if ((pldt = mdp->md_ldt) != NULL) { 450 if (len > pldt->ldt_len) 451 len = pldt->ldt_len; 452 bcopy(pldt->ldt_base, new_ldt->ldt_base, 453 len * sizeof(union descriptor)); 454 } else 455 bcopy(ldt, new_ldt->ldt_base, sizeof(ldt)); 456 457 return (new_ldt); 458 } 459 460 /* 461 * Must be called with dt_lock held. Returns with dt_lock unheld. 462 */ 463 void 464 user_ldt_free(struct thread *td) 465 { 466 struct mdproc *mdp = &td->td_proc->p_md; 467 struct proc_ldt *pldt; 468 469 mtx_assert(&dt_lock, MA_OWNED); 470 if ((pldt = mdp->md_ldt) == NULL) { 471 mtx_unlock_spin(&dt_lock); 472 return; 473 } 474 475 if (td == curthread) { 476 lldt(_default_ldt); 477 PCPU_SET(currentldt, _default_ldt); 478 } 479 480 mdp->md_ldt = NULL; 481 user_ldt_deref(pldt); 482 } 483 484 void 485 user_ldt_deref(struct proc_ldt *pldt) 486 { 487 488 mtx_assert(&dt_lock, MA_OWNED); 489 if (--pldt->ldt_refcnt == 0) { 490 mtx_unlock_spin(&dt_lock); 491 kmem_free(kernel_arena, (vm_offset_t)pldt->ldt_base, 492 pldt->ldt_len * sizeof(union descriptor)); 493 free(pldt, M_SUBPROC); 494 } else 495 mtx_unlock_spin(&dt_lock); 496 } 497 498 /* 499 * Note for the authors of compat layers (linux, etc): copyout() in 500 * the function below is not a problem since it presents data in 501 * arch-specific format (i.e. i386-specific in this case), not in 502 * the OS-specific one. 503 */ 504 int 505 i386_get_ldt(td, uap) 506 struct thread *td; 507 struct i386_ldt_args *uap; 508 { 509 int error = 0; 510 struct proc_ldt *pldt; 511 int nldt, num; 512 union descriptor *lp; 513 514 #ifdef DEBUG 515 printf("i386_get_ldt: start=%d num=%d descs=%p\n", 516 uap->start, uap->num, (void *)uap->descs); 517 #endif 518 519 mtx_lock_spin(&dt_lock); 520 if ((pldt = td->td_proc->p_md.md_ldt) != NULL) { 521 nldt = pldt->ldt_len; 522 lp = &((union descriptor *)(pldt->ldt_base))[uap->start]; 523 mtx_unlock_spin(&dt_lock); 524 num = min(uap->num, nldt); 525 } else { 526 mtx_unlock_spin(&dt_lock); 527 nldt = sizeof(ldt)/sizeof(ldt[0]); 528 num = min(uap->num, nldt); 529 lp = &ldt[uap->start]; 530 } 531 532 if ((uap->start > (unsigned int)nldt) || 533 ((unsigned int)num > (unsigned int)nldt) || 534 ((unsigned int)(uap->start + num) > (unsigned int)nldt)) 535 return(EINVAL); 536 537 error = copyout(lp, uap->descs, num * sizeof(union descriptor)); 538 if (!error) 539 td->td_retval[0] = num; 540 541 return(error); 542 } 543 544 int 545 i386_set_ldt(td, uap, descs) 546 struct thread *td; 547 struct i386_ldt_args *uap; 548 union descriptor *descs; 549 { 550 int error, i; 551 int largest_ld; 552 struct mdproc *mdp = &td->td_proc->p_md; 553 struct proc_ldt *pldt; 554 union descriptor *dp; 555 556 #ifdef DEBUG 557 printf("i386_set_ldt: start=%d num=%d descs=%p\n", 558 uap->start, uap->num, (void *)uap->descs); 559 #endif 560 error = 0; 561 562 if (descs == NULL) { 563 /* Free descriptors */ 564 if (uap->start == 0 && uap->num == 0) { 565 /* 566 * Treat this as a special case, so userland needn't 567 * know magic number NLDT. 568 */ 569 uap->start = NLDT; 570 uap->num = MAX_LD - NLDT; 571 } 572 if (uap->num == 0) 573 return (EINVAL); 574 mtx_lock_spin(&dt_lock); 575 if ((pldt = mdp->md_ldt) == NULL || 576 uap->start >= pldt->ldt_len) { 577 mtx_unlock_spin(&dt_lock); 578 return (0); 579 } 580 largest_ld = uap->start + uap->num; 581 if (largest_ld > pldt->ldt_len) 582 largest_ld = pldt->ldt_len; 583 for (i = uap->start; i < largest_ld; i++) 584 atomic_store_rel_64(&((uint64_t *)(pldt->ldt_base))[i], 585 0); 586 mtx_unlock_spin(&dt_lock); 587 return (0); 588 } 589 590 if (!(uap->start == LDT_AUTO_ALLOC && uap->num == 1)) { 591 /* verify range of descriptors to modify */ 592 largest_ld = uap->start + uap->num; 593 if (uap->start >= MAX_LD || largest_ld > MAX_LD) { 594 return (EINVAL); 595 } 596 } 597 598 /* Check descriptors for access violations */ 599 for (i = 0; i < uap->num; i++) { 600 dp = &descs[i]; 601 602 switch (dp->sd.sd_type) { 603 case SDT_SYSNULL: /* system null */ 604 dp->sd.sd_p = 0; 605 break; 606 case SDT_SYS286TSS: /* system 286 TSS available */ 607 case SDT_SYSLDT: /* system local descriptor table */ 608 case SDT_SYS286BSY: /* system 286 TSS busy */ 609 case SDT_SYSTASKGT: /* system task gate */ 610 case SDT_SYS286IGT: /* system 286 interrupt gate */ 611 case SDT_SYS286TGT: /* system 286 trap gate */ 612 case SDT_SYSNULL2: /* undefined by Intel */ 613 case SDT_SYS386TSS: /* system 386 TSS available */ 614 case SDT_SYSNULL3: /* undefined by Intel */ 615 case SDT_SYS386BSY: /* system 386 TSS busy */ 616 case SDT_SYSNULL4: /* undefined by Intel */ 617 case SDT_SYS386IGT: /* system 386 interrupt gate */ 618 case SDT_SYS386TGT: /* system 386 trap gate */ 619 case SDT_SYS286CGT: /* system 286 call gate */ 620 case SDT_SYS386CGT: /* system 386 call gate */ 621 /* I can't think of any reason to allow a user proc 622 * to create a segment of these types. They are 623 * for OS use only. 624 */ 625 return (EACCES); 626 /*NOTREACHED*/ 627 628 /* memory segment types */ 629 case SDT_MEMEC: /* memory execute only conforming */ 630 case SDT_MEMEAC: /* memory execute only accessed conforming */ 631 case SDT_MEMERC: /* memory execute read conforming */ 632 case SDT_MEMERAC: /* memory execute read accessed conforming */ 633 /* Must be "present" if executable and conforming. */ 634 if (dp->sd.sd_p == 0) 635 return (EACCES); 636 break; 637 case SDT_MEMRO: /* memory read only */ 638 case SDT_MEMROA: /* memory read only accessed */ 639 case SDT_MEMRW: /* memory read write */ 640 case SDT_MEMRWA: /* memory read write accessed */ 641 case SDT_MEMROD: /* memory read only expand dwn limit */ 642 case SDT_MEMRODA: /* memory read only expand dwn lim accessed */ 643 case SDT_MEMRWD: /* memory read write expand dwn limit */ 644 case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */ 645 case SDT_MEME: /* memory execute only */ 646 case SDT_MEMEA: /* memory execute only accessed */ 647 case SDT_MEMER: /* memory execute read */ 648 case SDT_MEMERA: /* memory execute read accessed */ 649 break; 650 default: 651 return(EINVAL); 652 /*NOTREACHED*/ 653 } 654 655 /* Only user (ring-3) descriptors may be present. */ 656 if ((dp->sd.sd_p != 0) && (dp->sd.sd_dpl != SEL_UPL)) 657 return (EACCES); 658 } 659 660 if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) { 661 /* Allocate a free slot */ 662 mtx_lock_spin(&dt_lock); 663 if ((pldt = mdp->md_ldt) == NULL) { 664 if ((error = i386_ldt_grow(td, NLDT + 1))) { 665 mtx_unlock_spin(&dt_lock); 666 return (error); 667 } 668 pldt = mdp->md_ldt; 669 } 670 again: 671 /* 672 * start scanning a bit up to leave room for NVidia and 673 * Wine, which still user the "Blat" method of allocation. 674 */ 675 dp = &((union descriptor *)(pldt->ldt_base))[NLDT]; 676 for (i = NLDT; i < pldt->ldt_len; ++i) { 677 if (dp->sd.sd_type == SDT_SYSNULL) 678 break; 679 dp++; 680 } 681 if (i >= pldt->ldt_len) { 682 if ((error = i386_ldt_grow(td, pldt->ldt_len+1))) { 683 mtx_unlock_spin(&dt_lock); 684 return (error); 685 } 686 goto again; 687 } 688 uap->start = i; 689 error = i386_set_ldt_data(td, i, 1, descs); 690 mtx_unlock_spin(&dt_lock); 691 } else { 692 largest_ld = uap->start + uap->num; 693 mtx_lock_spin(&dt_lock); 694 if (!(error = i386_ldt_grow(td, largest_ld))) { 695 error = i386_set_ldt_data(td, uap->start, uap->num, 696 descs); 697 } 698 mtx_unlock_spin(&dt_lock); 699 } 700 if (error == 0) 701 td->td_retval[0] = uap->start; 702 return (error); 703 } 704 705 static int 706 i386_set_ldt_data(struct thread *td, int start, int num, 707 union descriptor *descs) 708 { 709 struct mdproc *mdp; 710 struct proc_ldt *pldt; 711 uint64_t *dst, *src; 712 int i; 713 714 mtx_assert(&dt_lock, MA_OWNED); 715 716 mdp = &td->td_proc->p_md; 717 pldt = mdp->md_ldt; 718 dst = (uint64_t *)(pldt->ldt_base); 719 src = (uint64_t *)descs; 720 721 /* 722 * Atomic(9) is used only to get 64bit atomic store with 723 * cmpxchg8b when available. There is no op without release 724 * semantic. 725 */ 726 for (i = 0; i < num; i++) 727 atomic_store_rel_64(&dst[start + i], src[i]); 728 return (0); 729 } 730 731 static int 732 i386_ldt_grow(struct thread *td, int len) 733 { 734 struct mdproc *mdp = &td->td_proc->p_md; 735 struct proc_ldt *new_ldt, *pldt; 736 caddr_t old_ldt_base = NULL_LDT_BASE; 737 int old_ldt_len = 0; 738 739 mtx_assert(&dt_lock, MA_OWNED); 740 741 if (len > MAX_LD) 742 return (ENOMEM); 743 if (len < NLDT + 1) 744 len = NLDT + 1; 745 746 /* Allocate a user ldt. */ 747 if ((pldt = mdp->md_ldt) == NULL || len > pldt->ldt_len) { 748 new_ldt = user_ldt_alloc(mdp, len); 749 if (new_ldt == NULL) 750 return (ENOMEM); 751 pldt = mdp->md_ldt; 752 753 if (pldt != NULL) { 754 if (new_ldt->ldt_len <= pldt->ldt_len) { 755 /* 756 * We just lost the race for allocation, so 757 * free the new object and return. 758 */ 759 mtx_unlock_spin(&dt_lock); 760 kmem_free(kernel_arena, 761 (vm_offset_t)new_ldt->ldt_base, 762 new_ldt->ldt_len * sizeof(union descriptor)); 763 free(new_ldt, M_SUBPROC); 764 mtx_lock_spin(&dt_lock); 765 return (0); 766 } 767 768 /* 769 * We have to substitute the current LDT entry for 770 * curproc with the new one since its size grew. 771 */ 772 old_ldt_base = pldt->ldt_base; 773 old_ldt_len = pldt->ldt_len; 774 pldt->ldt_sd = new_ldt->ldt_sd; 775 pldt->ldt_base = new_ldt->ldt_base; 776 pldt->ldt_len = new_ldt->ldt_len; 777 } else 778 mdp->md_ldt = pldt = new_ldt; 779 #ifdef SMP 780 /* 781 * Signal other cpus to reload ldt. We need to unlock dt_lock 782 * here because other CPU will contest on it since their 783 * curthreads won't hold the lock and will block when trying 784 * to acquire it. 785 */ 786 mtx_unlock_spin(&dt_lock); 787 smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv, 788 NULL, td->td_proc->p_vmspace); 789 #else 790 set_user_ldt(&td->td_proc->p_md); 791 mtx_unlock_spin(&dt_lock); 792 #endif 793 if (old_ldt_base != NULL_LDT_BASE) { 794 kmem_free(kernel_arena, (vm_offset_t)old_ldt_base, 795 old_ldt_len * sizeof(union descriptor)); 796 free(new_ldt, M_SUBPROC); 797 } 798 mtx_lock_spin(&dt_lock); 799 } 800 return (0); 801 } 802