1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1990 The Regents of the University of California. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_capsicum.h" 38 #include "opt_kstack_pages.h" 39 40 #include <sys/param.h> 41 #include <sys/capsicum.h> 42 #include <sys/systm.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mutex.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/smp.h> 49 #include <sys/sysproto.h> 50 51 #include <vm/vm.h> 52 #include <vm/pmap.h> 53 #include <vm/vm_map.h> 54 #include <vm/vm_extern.h> 55 56 #include <machine/atomic.h> 57 #include <machine/cpu.h> 58 #include <machine/pcb.h> 59 #include <machine/pcb_ext.h> 60 #include <machine/proc.h> 61 #include <machine/sysarch.h> 62 63 #include <security/audit/audit.h> 64 65 #include <vm/vm_kern.h> /* for kernel_map */ 66 67 #define MAX_LD 8192 68 #define LD_PER_PAGE 512 69 #define NEW_MAX_LD(num) rounddown2(num + LD_PER_PAGE, LD_PER_PAGE) 70 #define SIZE_FROM_LARGEST_LD(num) (NEW_MAX_LD(num) << 3) 71 #define NULL_LDT_BASE ((caddr_t)NULL) 72 73 #ifdef SMP 74 static void set_user_ldt_rv(void *arg); 75 #endif 76 static int i386_set_ldt_data(struct thread *, int start, int num, 77 union descriptor *descs); 78 static int i386_ldt_grow(struct thread *td, int len); 79 80 void 81 fill_based_sd(struct segment_descriptor *sdp, uint32_t base) 82 { 83 84 sdp->sd_lobase = base & 0xffffff; 85 sdp->sd_hibase = (base >> 24) & 0xff; 86 sdp->sd_lolimit = 0xffff; /* 4GB limit, wraps around */ 87 sdp->sd_hilimit = 0xf; 88 sdp->sd_type = SDT_MEMRWA; 89 sdp->sd_dpl = SEL_UPL; 90 sdp->sd_p = 1; 91 sdp->sd_xx = 0; 92 sdp->sd_def32 = 1; 93 sdp->sd_gran = 1; 94 } 95 96 /* 97 * Construct special descriptors for "base" selectors. Store them in 98 * the PCB for later use by cpu_switch(). Store them in the GDT for 99 * more immediate use. The GDT entries are part of the current 100 * context. Callers must load related segment registers to complete 101 * setting up the current context. 102 */ 103 void 104 set_fsbase(struct thread *td, uint32_t base) 105 { 106 struct segment_descriptor sd; 107 108 fill_based_sd(&sd, base); 109 critical_enter(); 110 td->td_pcb->pcb_fsd = sd; 111 PCPU_GET(fsgs_gdt)[0] = sd; 112 critical_exit(); 113 } 114 115 void 116 set_gsbase(struct thread *td, uint32_t base) 117 { 118 struct segment_descriptor sd; 119 120 fill_based_sd(&sd, base); 121 critical_enter(); 122 td->td_pcb->pcb_gsd = sd; 123 PCPU_GET(fsgs_gdt)[1] = sd; 124 critical_exit(); 125 } 126 127 #ifndef _SYS_SYSPROTO_H_ 128 struct sysarch_args { 129 int op; 130 char *parms; 131 }; 132 #endif 133 134 int 135 sysarch(struct thread *td, struct sysarch_args *uap) 136 { 137 int error; 138 union descriptor *lp; 139 union { 140 struct i386_ldt_args largs; 141 struct i386_ioperm_args iargs; 142 struct i386_get_xfpustate xfpu; 143 } kargs; 144 uint32_t base; 145 struct segment_descriptor *sdp; 146 147 AUDIT_ARG_CMD(uap->op); 148 149 #ifdef CAPABILITY_MODE 150 /* 151 * When adding new operations, add a new case statement here to 152 * explicitly indicate whether or not the operation is safe to 153 * perform in capability mode. 154 */ 155 if (IN_CAPABILITY_MODE(td)) { 156 switch (uap->op) { 157 case I386_GET_LDT: 158 case I386_SET_LDT: 159 case I386_GET_IOPERM: 160 case I386_GET_FSBASE: 161 case I386_SET_FSBASE: 162 case I386_GET_GSBASE: 163 case I386_SET_GSBASE: 164 case I386_GET_XFPUSTATE: 165 break; 166 167 case I386_SET_IOPERM: 168 default: 169 #ifdef KTRACE 170 if (KTRPOINT(td, KTR_CAPFAIL)) 171 ktrcapfail(CAPFAIL_SYSCALL, NULL, NULL); 172 #endif 173 return (ECAPMODE); 174 } 175 } 176 #endif 177 178 switch (uap->op) { 179 case I386_GET_IOPERM: 180 case I386_SET_IOPERM: 181 if ((error = copyin(uap->parms, &kargs.iargs, 182 sizeof(struct i386_ioperm_args))) != 0) 183 return (error); 184 break; 185 case I386_GET_LDT: 186 case I386_SET_LDT: 187 if ((error = copyin(uap->parms, &kargs.largs, 188 sizeof(struct i386_ldt_args))) != 0) 189 return (error); 190 break; 191 case I386_GET_XFPUSTATE: 192 if ((error = copyin(uap->parms, &kargs.xfpu, 193 sizeof(struct i386_get_xfpustate))) != 0) 194 return (error); 195 break; 196 default: 197 break; 198 } 199 200 switch (uap->op) { 201 case I386_GET_LDT: 202 error = i386_get_ldt(td, &kargs.largs); 203 break; 204 case I386_SET_LDT: 205 if (kargs.largs.descs != NULL) { 206 if (kargs.largs.num > MAX_LD) 207 return (EINVAL); 208 lp = malloc(kargs.largs.num * sizeof(union descriptor), 209 M_TEMP, M_WAITOK); 210 error = copyin(kargs.largs.descs, lp, 211 kargs.largs.num * sizeof(union descriptor)); 212 if (error == 0) 213 error = i386_set_ldt(td, &kargs.largs, lp); 214 free(lp, M_TEMP); 215 } else { 216 error = i386_set_ldt(td, &kargs.largs, NULL); 217 } 218 break; 219 case I386_GET_IOPERM: 220 error = i386_get_ioperm(td, &kargs.iargs); 221 if (error == 0) 222 error = copyout(&kargs.iargs, uap->parms, 223 sizeof(struct i386_ioperm_args)); 224 break; 225 case I386_SET_IOPERM: 226 error = i386_set_ioperm(td, &kargs.iargs); 227 break; 228 case I386_VM86: 229 error = vm86_sysarch(td, uap->parms); 230 break; 231 case I386_GET_FSBASE: 232 sdp = &td->td_pcb->pcb_fsd; 233 base = sdp->sd_hibase << 24 | sdp->sd_lobase; 234 error = copyout(&base, uap->parms, sizeof(base)); 235 break; 236 case I386_SET_FSBASE: 237 error = copyin(uap->parms, &base, sizeof(base)); 238 if (error == 0) { 239 /* 240 * Construct the special descriptor for fsbase 241 * and arrange for doreti to load its selector 242 * soon enough. 243 */ 244 set_fsbase(td, base); 245 td->td_frame->tf_fs = GSEL(GUFS_SEL, SEL_UPL); 246 } 247 break; 248 case I386_GET_GSBASE: 249 sdp = &td->td_pcb->pcb_gsd; 250 base = sdp->sd_hibase << 24 | sdp->sd_lobase; 251 error = copyout(&base, uap->parms, sizeof(base)); 252 break; 253 case I386_SET_GSBASE: 254 error = copyin(uap->parms, &base, sizeof(base)); 255 if (error == 0) { 256 /* 257 * Construct the special descriptor for gsbase. 258 * The selector is loaded immediately, since we 259 * normally only reload %gs on context switches. 260 */ 261 set_gsbase(td, base); 262 load_gs(GSEL(GUGS_SEL, SEL_UPL)); 263 } 264 break; 265 case I386_GET_XFPUSTATE: 266 if (kargs.xfpu.len > cpu_max_ext_state_size - 267 sizeof(union savefpu)) 268 return (EINVAL); 269 npxgetregs(td); 270 error = copyout((char *)(get_pcb_user_save_td(td) + 1), 271 kargs.xfpu.addr, kargs.xfpu.len); 272 break; 273 default: 274 error = EINVAL; 275 break; 276 } 277 return (error); 278 } 279 280 int 281 i386_extend_pcb(struct thread *td) 282 { 283 int i, offset; 284 u_long *addr; 285 struct pcb_ext *ext; 286 struct soft_segment_descriptor ssd = { 287 0, /* segment base address (overwritten) */ 288 ctob(IOPAGES + 1) - 1, /* length */ 289 SDT_SYS386TSS, /* segment type */ 290 0, /* priority level */ 291 1, /* descriptor present */ 292 0, 0, 293 0, /* default 32 size */ 294 0 /* granularity */ 295 }; 296 297 ext = (struct pcb_ext *)kmem_malloc(kernel_arena, ctob(IOPAGES+1), 298 M_WAITOK | M_ZERO); 299 /* -16 is so we can convert a trapframe into vm86trapframe inplace */ 300 ext->ext_tss.tss_esp0 = (vm_offset_t)td->td_pcb - 16; 301 ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); 302 /* 303 * The last byte of the i/o map must be followed by an 0xff byte. 304 * We arbitrarily allocate 16 bytes here, to keep the starting 305 * address on a doubleword boundary. 306 */ 307 offset = PAGE_SIZE - 16; 308 ext->ext_tss.tss_ioopt = 309 (offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16; 310 ext->ext_iomap = (caddr_t)ext + offset; 311 ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32; 312 313 addr = (u_long *)ext->ext_vm86.vm86_intmap; 314 for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++) 315 *addr++ = ~0; 316 317 ssd.ssd_base = (unsigned)&ext->ext_tss; 318 ssd.ssd_limit -= ((unsigned)&ext->ext_tss - (unsigned)ext); 319 ssdtosd(&ssd, &ext->ext_tssd); 320 321 KASSERT(td == curthread, ("giving TSS to !curthread")); 322 KASSERT(td->td_pcb->pcb_ext == 0, ("already have a TSS!")); 323 324 /* Switch to the new TSS. */ 325 critical_enter(); 326 td->td_pcb->pcb_ext = ext; 327 PCPU_SET(private_tss, 1); 328 *PCPU_GET(tss_gdt) = ext->ext_tssd; 329 ltr(GSEL(GPROC0_SEL, SEL_KPL)); 330 critical_exit(); 331 332 return 0; 333 } 334 335 int 336 i386_set_ioperm(td, uap) 337 struct thread *td; 338 struct i386_ioperm_args *uap; 339 { 340 char *iomap; 341 u_int i; 342 int error; 343 344 if ((error = priv_check(td, PRIV_IO)) != 0) 345 return (error); 346 if ((error = securelevel_gt(td->td_ucred, 0)) != 0) 347 return (error); 348 /* 349 * XXX 350 * While this is restricted to root, we should probably figure out 351 * whether any other driver is using this i/o address, as so not to 352 * cause confusion. This probably requires a global 'usage registry'. 353 */ 354 355 if (td->td_pcb->pcb_ext == 0) 356 if ((error = i386_extend_pcb(td)) != 0) 357 return (error); 358 iomap = (char *)td->td_pcb->pcb_ext->ext_iomap; 359 360 if (uap->start > uap->start + uap->length || 361 uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY) 362 return (EINVAL); 363 364 for (i = uap->start; i < uap->start + uap->length; i++) { 365 if (uap->enable) 366 iomap[i >> 3] &= ~(1 << (i & 7)); 367 else 368 iomap[i >> 3] |= (1 << (i & 7)); 369 } 370 return (error); 371 } 372 373 int 374 i386_get_ioperm(td, uap) 375 struct thread *td; 376 struct i386_ioperm_args *uap; 377 { 378 int i, state; 379 char *iomap; 380 381 if (uap->start >= IOPAGES * PAGE_SIZE * NBBY) 382 return (EINVAL); 383 384 if (td->td_pcb->pcb_ext == 0) { 385 uap->length = 0; 386 goto done; 387 } 388 389 iomap = (char *)td->td_pcb->pcb_ext->ext_iomap; 390 391 i = uap->start; 392 state = (iomap[i >> 3] >> (i & 7)) & 1; 393 uap->enable = !state; 394 uap->length = 1; 395 396 for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) { 397 if (state != ((iomap[i >> 3] >> (i & 7)) & 1)) 398 break; 399 uap->length++; 400 } 401 402 done: 403 return (0); 404 } 405 406 /* 407 * Update the GDT entry pointing to the LDT to point to the LDT of the 408 * current process. Manage dt_lock holding/unholding autonomously. 409 */ 410 static void 411 set_user_ldt_locked(struct mdproc *mdp) 412 { 413 struct proc_ldt *pldt; 414 int gdt_idx; 415 416 mtx_assert(&dt_lock, MA_OWNED); 417 418 pldt = mdp->md_ldt; 419 gdt_idx = GUSERLDT_SEL; 420 gdt_idx += PCPU_GET(cpuid) * NGDT; /* always 0 on UP */ 421 gdt[gdt_idx].sd = pldt->ldt_sd; 422 lldt(GSEL(GUSERLDT_SEL, SEL_KPL)); 423 PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL)); 424 } 425 426 void 427 set_user_ldt(struct mdproc *mdp) 428 { 429 430 mtx_lock_spin(&dt_lock); 431 set_user_ldt_locked(mdp); 432 mtx_unlock_spin(&dt_lock); 433 } 434 435 #ifdef SMP 436 static void 437 set_user_ldt_rv(void *arg) 438 { 439 struct proc *p; 440 441 p = curproc; 442 if (arg == p->p_vmspace) 443 set_user_ldt(&p->p_md); 444 } 445 #endif 446 447 /* 448 * dt_lock must be held. Returns with dt_lock held. 449 */ 450 struct proc_ldt * 451 user_ldt_alloc(struct mdproc *mdp, int len) 452 { 453 struct proc_ldt *pldt, *new_ldt; 454 455 mtx_assert(&dt_lock, MA_OWNED); 456 mtx_unlock_spin(&dt_lock); 457 new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK); 458 459 new_ldt->ldt_len = len = NEW_MAX_LD(len); 460 new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena, 461 len * sizeof(union descriptor), M_WAITOK | M_ZERO); 462 new_ldt->ldt_refcnt = 1; 463 new_ldt->ldt_active = 0; 464 465 mtx_lock_spin(&dt_lock); 466 gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)new_ldt->ldt_base; 467 gdt_segs[GUSERLDT_SEL].ssd_limit = len * sizeof(union descriptor) - 1; 468 ssdtosd(&gdt_segs[GUSERLDT_SEL], &new_ldt->ldt_sd); 469 470 if ((pldt = mdp->md_ldt) != NULL) { 471 if (len > pldt->ldt_len) 472 len = pldt->ldt_len; 473 bcopy(pldt->ldt_base, new_ldt->ldt_base, 474 len * sizeof(union descriptor)); 475 } else 476 bcopy(ldt, new_ldt->ldt_base, sizeof(ldt)); 477 478 return (new_ldt); 479 } 480 481 /* 482 * Must be called with dt_lock held. Returns with dt_lock unheld. 483 */ 484 void 485 user_ldt_free(struct thread *td) 486 { 487 struct mdproc *mdp; 488 struct proc_ldt *pldt; 489 490 mtx_assert(&dt_lock, MA_OWNED); 491 mdp = &td->td_proc->p_md; 492 if ((pldt = mdp->md_ldt) == NULL) { 493 mtx_unlock_spin(&dt_lock); 494 return; 495 } 496 497 if (td == curthread) { 498 lldt(_default_ldt); 499 PCPU_SET(currentldt, _default_ldt); 500 } 501 502 mdp->md_ldt = NULL; 503 user_ldt_deref(pldt); 504 } 505 506 void 507 user_ldt_deref(struct proc_ldt *pldt) 508 { 509 510 mtx_assert(&dt_lock, MA_OWNED); 511 if (--pldt->ldt_refcnt == 0) { 512 mtx_unlock_spin(&dt_lock); 513 kmem_free(kernel_arena, (vm_offset_t)pldt->ldt_base, 514 pldt->ldt_len * sizeof(union descriptor)); 515 free(pldt, M_SUBPROC); 516 } else 517 mtx_unlock_spin(&dt_lock); 518 } 519 520 /* 521 * Note for the authors of compat layers (linux, etc): copyout() in 522 * the function below is not a problem since it presents data in 523 * arch-specific format (i.e. i386-specific in this case), not in 524 * the OS-specific one. 525 */ 526 int 527 i386_get_ldt(struct thread *td, struct i386_ldt_args *uap) 528 { 529 struct proc_ldt *pldt; 530 char *data; 531 u_int nldt, num; 532 int error; 533 534 #ifdef DEBUG 535 printf("i386_get_ldt: start=%u num=%u descs=%p\n", 536 uap->start, uap->num, (void *)uap->descs); 537 #endif 538 539 num = min(uap->num, MAX_LD); 540 data = malloc(num * sizeof(union descriptor), M_TEMP, M_WAITOK); 541 mtx_lock_spin(&dt_lock); 542 pldt = td->td_proc->p_md.md_ldt; 543 nldt = pldt != NULL ? pldt->ldt_len : nitems(ldt); 544 if (uap->start >= nldt) { 545 num = 0; 546 } else { 547 num = min(num, nldt - uap->start); 548 bcopy(pldt != NULL ? 549 &((union descriptor *)(pldt->ldt_base))[uap->start] : 550 &ldt[uap->start], data, num * sizeof(union descriptor)); 551 } 552 mtx_unlock_spin(&dt_lock); 553 error = copyout(data, uap->descs, num * sizeof(union descriptor)); 554 if (error == 0) 555 td->td_retval[0] = num; 556 free(data, M_TEMP); 557 return (error); 558 } 559 560 int 561 i386_set_ldt(struct thread *td, struct i386_ldt_args *uap, 562 union descriptor *descs) 563 { 564 struct mdproc *mdp; 565 struct proc_ldt *pldt; 566 union descriptor *dp; 567 u_int largest_ld, i; 568 int error; 569 570 #ifdef DEBUG 571 printf("i386_set_ldt: start=%u num=%u descs=%p\n", 572 uap->start, uap->num, (void *)uap->descs); 573 #endif 574 error = 0; 575 mdp = &td->td_proc->p_md; 576 577 if (descs == NULL) { 578 /* Free descriptors */ 579 if (uap->start == 0 && uap->num == 0) { 580 /* 581 * Treat this as a special case, so userland needn't 582 * know magic number NLDT. 583 */ 584 uap->start = NLDT; 585 uap->num = MAX_LD - NLDT; 586 } 587 mtx_lock_spin(&dt_lock); 588 if ((pldt = mdp->md_ldt) == NULL || 589 uap->start >= pldt->ldt_len) { 590 mtx_unlock_spin(&dt_lock); 591 return (0); 592 } 593 largest_ld = uap->start + uap->num; 594 if (largest_ld > pldt->ldt_len) 595 largest_ld = pldt->ldt_len; 596 for (i = uap->start; i < largest_ld; i++) 597 atomic_store_rel_64(&((uint64_t *)(pldt->ldt_base))[i], 598 0); 599 mtx_unlock_spin(&dt_lock); 600 return (0); 601 } 602 603 if (uap->start != LDT_AUTO_ALLOC || uap->num != 1) { 604 /* verify range of descriptors to modify */ 605 largest_ld = uap->start + uap->num; 606 if (uap->start >= MAX_LD || largest_ld > MAX_LD) 607 return (EINVAL); 608 } 609 610 /* Check descriptors for access violations */ 611 for (i = 0; i < uap->num; i++) { 612 dp = &descs[i]; 613 614 switch (dp->sd.sd_type) { 615 case SDT_SYSNULL: /* system null */ 616 dp->sd.sd_p = 0; 617 break; 618 case SDT_SYS286TSS: /* system 286 TSS available */ 619 case SDT_SYSLDT: /* system local descriptor table */ 620 case SDT_SYS286BSY: /* system 286 TSS busy */ 621 case SDT_SYSTASKGT: /* system task gate */ 622 case SDT_SYS286IGT: /* system 286 interrupt gate */ 623 case SDT_SYS286TGT: /* system 286 trap gate */ 624 case SDT_SYSNULL2: /* undefined by Intel */ 625 case SDT_SYS386TSS: /* system 386 TSS available */ 626 case SDT_SYSNULL3: /* undefined by Intel */ 627 case SDT_SYS386BSY: /* system 386 TSS busy */ 628 case SDT_SYSNULL4: /* undefined by Intel */ 629 case SDT_SYS386IGT: /* system 386 interrupt gate */ 630 case SDT_SYS386TGT: /* system 386 trap gate */ 631 case SDT_SYS286CGT: /* system 286 call gate */ 632 case SDT_SYS386CGT: /* system 386 call gate */ 633 return (EACCES); 634 635 /* memory segment types */ 636 case SDT_MEMEC: /* memory execute only conforming */ 637 case SDT_MEMEAC: /* memory execute only accessed conforming */ 638 case SDT_MEMERC: /* memory execute read conforming */ 639 case SDT_MEMERAC: /* memory execute read accessed conforming */ 640 /* Must be "present" if executable and conforming. */ 641 if (dp->sd.sd_p == 0) 642 return (EACCES); 643 break; 644 case SDT_MEMRO: /* memory read only */ 645 case SDT_MEMROA: /* memory read only accessed */ 646 case SDT_MEMRW: /* memory read write */ 647 case SDT_MEMRWA: /* memory read write accessed */ 648 case SDT_MEMROD: /* memory read only expand dwn limit */ 649 case SDT_MEMRODA: /* memory read only expand dwn lim accessed */ 650 case SDT_MEMRWD: /* memory read write expand dwn limit */ 651 case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */ 652 case SDT_MEME: /* memory execute only */ 653 case SDT_MEMEA: /* memory execute only accessed */ 654 case SDT_MEMER: /* memory execute read */ 655 case SDT_MEMERA: /* memory execute read accessed */ 656 break; 657 default: 658 return (EINVAL); 659 } 660 661 /* Only user (ring-3) descriptors may be present. */ 662 if (dp->sd.sd_p != 0 && dp->sd.sd_dpl != SEL_UPL) 663 return (EACCES); 664 } 665 666 if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) { 667 /* Allocate a free slot */ 668 mtx_lock_spin(&dt_lock); 669 if ((pldt = mdp->md_ldt) == NULL) { 670 if ((error = i386_ldt_grow(td, NLDT + 1))) { 671 mtx_unlock_spin(&dt_lock); 672 return (error); 673 } 674 pldt = mdp->md_ldt; 675 } 676 again: 677 /* 678 * start scanning a bit up to leave room for NVidia and 679 * Wine, which still user the "Blat" method of allocation. 680 */ 681 dp = &((union descriptor *)(pldt->ldt_base))[NLDT]; 682 for (i = NLDT; i < pldt->ldt_len; ++i) { 683 if (dp->sd.sd_type == SDT_SYSNULL) 684 break; 685 dp++; 686 } 687 if (i >= pldt->ldt_len) { 688 if ((error = i386_ldt_grow(td, pldt->ldt_len+1))) { 689 mtx_unlock_spin(&dt_lock); 690 return (error); 691 } 692 goto again; 693 } 694 uap->start = i; 695 error = i386_set_ldt_data(td, i, 1, descs); 696 mtx_unlock_spin(&dt_lock); 697 } else { 698 largest_ld = uap->start + uap->num; 699 mtx_lock_spin(&dt_lock); 700 if (!(error = i386_ldt_grow(td, largest_ld))) { 701 error = i386_set_ldt_data(td, uap->start, uap->num, 702 descs); 703 } 704 mtx_unlock_spin(&dt_lock); 705 } 706 if (error == 0) 707 td->td_retval[0] = uap->start; 708 return (error); 709 } 710 711 static int 712 i386_set_ldt_data(struct thread *td, int start, int num, 713 union descriptor *descs) 714 { 715 struct mdproc *mdp; 716 struct proc_ldt *pldt; 717 uint64_t *dst, *src; 718 int i; 719 720 mtx_assert(&dt_lock, MA_OWNED); 721 722 mdp = &td->td_proc->p_md; 723 pldt = mdp->md_ldt; 724 dst = (uint64_t *)(pldt->ldt_base); 725 src = (uint64_t *)descs; 726 727 /* 728 * Atomic(9) is used only to get 64bit atomic store with 729 * cmpxchg8b when available. There is no op without release 730 * semantic. 731 */ 732 for (i = 0; i < num; i++) 733 atomic_store_rel_64(&dst[start + i], src[i]); 734 return (0); 735 } 736 737 static int 738 i386_ldt_grow(struct thread *td, int len) 739 { 740 struct mdproc *mdp; 741 struct proc_ldt *new_ldt, *pldt; 742 caddr_t old_ldt_base; 743 int old_ldt_len; 744 745 mtx_assert(&dt_lock, MA_OWNED); 746 747 if (len > MAX_LD) 748 return (ENOMEM); 749 if (len < NLDT + 1) 750 len = NLDT + 1; 751 752 mdp = &td->td_proc->p_md; 753 old_ldt_base = NULL_LDT_BASE; 754 old_ldt_len = 0; 755 756 /* Allocate a user ldt. */ 757 if ((pldt = mdp->md_ldt) == NULL || len > pldt->ldt_len) { 758 new_ldt = user_ldt_alloc(mdp, len); 759 if (new_ldt == NULL) 760 return (ENOMEM); 761 pldt = mdp->md_ldt; 762 763 if (pldt != NULL) { 764 if (new_ldt->ldt_len <= pldt->ldt_len) { 765 /* 766 * We just lost the race for allocation, so 767 * free the new object and return. 768 */ 769 mtx_unlock_spin(&dt_lock); 770 kmem_free(kernel_arena, 771 (vm_offset_t)new_ldt->ldt_base, 772 new_ldt->ldt_len * sizeof(union descriptor)); 773 free(new_ldt, M_SUBPROC); 774 mtx_lock_spin(&dt_lock); 775 return (0); 776 } 777 778 /* 779 * We have to substitute the current LDT entry for 780 * curproc with the new one since its size grew. 781 */ 782 old_ldt_base = pldt->ldt_base; 783 old_ldt_len = pldt->ldt_len; 784 pldt->ldt_sd = new_ldt->ldt_sd; 785 pldt->ldt_base = new_ldt->ldt_base; 786 pldt->ldt_len = new_ldt->ldt_len; 787 } else 788 mdp->md_ldt = pldt = new_ldt; 789 #ifdef SMP 790 /* 791 * Signal other cpus to reload ldt. We need to unlock dt_lock 792 * here because other CPU will contest on it since their 793 * curthreads won't hold the lock and will block when trying 794 * to acquire it. 795 */ 796 mtx_unlock_spin(&dt_lock); 797 smp_rendezvous(NULL, set_user_ldt_rv, NULL, 798 td->td_proc->p_vmspace); 799 #else 800 set_user_ldt_locked(&td->td_proc->p_md); 801 mtx_unlock_spin(&dt_lock); 802 #endif 803 if (old_ldt_base != NULL_LDT_BASE) { 804 kmem_free(kernel_arena, (vm_offset_t)old_ldt_base, 805 old_ldt_len * sizeof(union descriptor)); 806 free(new_ldt, M_SUBPROC); 807 } 808 mtx_lock_spin(&dt_lock); 809 } 810 return (0); 811 } 812