1 /* $NetBSD: sys_lwp.c,v 1.52 2010/07/07 01:30:37 chs Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nathan J. Williams, and Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Lightweight process (LWP) system calls. See kern_lwp.c for a description 34 * of LWPs. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.52 2010/07/07 01:30:37 chs Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/pool.h> 43 #include <sys/proc.h> 44 #include <sys/types.h> 45 #include <sys/syscallargs.h> 46 #include <sys/kauth.h> 47 #include <sys/kmem.h> 48 #include <sys/sleepq.h> 49 #include <sys/lwpctl.h> 50 #include <sys/cpu.h> 51 52 #include <uvm/uvm_extern.h> 53 54 #include "opt_sa.h" 55 56 #define LWP_UNPARK_MAX 1024 57 58 static syncobj_t lwp_park_sobj = { 59 SOBJ_SLEEPQ_LIFO, 60 sleepq_unsleep, 61 sleepq_changepri, 62 sleepq_lendpri, 63 syncobj_noowner, 64 }; 65 66 static sleeptab_t lwp_park_tab; 67 68 void 69 lwp_sys_init(void) 70 { 71 sleeptab_init(&lwp_park_tab); 72 } 73 74 int 75 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, 76 register_t *retval) 77 { 78 /* { 79 syscallarg(const ucontext_t *) ucp; 80 syscallarg(u_long) flags; 81 syscallarg(lwpid_t *) new_lwp; 82 } */ 83 struct proc *p = l->l_proc; 84 struct lwp *l2; 85 struct schedstate_percpu *spc; 86 vaddr_t uaddr; 87 ucontext_t *newuc; 88 int error, lid; 89 90 #ifdef KERN_SA 91 mutex_enter(p->p_lock); 92 if ((p->p_sflag & (PS_SA | PS_WEXIT)) != 0 || p->p_sa != NULL) { 93 mutex_exit(p->p_lock); 94 return EINVAL; 95 } 96 mutex_exit(p->p_lock); 97 #endif 98 99 newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP); 100 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize); 101 if (error) { 102 kmem_free(newuc, sizeof(ucontext_t)); 103 return error; 104 } 105 106 /* XXX check against resource limits */ 107 108 uaddr = uvm_uarea_alloc(); 109 if (__predict_false(uaddr == 0)) { 110 kmem_free(newuc, sizeof(ucontext_t)); 111 return ENOMEM; 112 } 113 114 error = lwp_create(l, p, uaddr, SCARG(uap, flags) & LWP_DETACHED, 115 NULL, 0, p->p_emul->e_startlwp, newuc, &l2, l->l_class); 116 if (__predict_false(error)) { 117 uvm_uarea_free(uaddr); 118 kmem_free(newuc, sizeof(ucontext_t)); 119 return error; 120 } 121 122 lid = l2->l_lid; 123 error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid)); 124 if (error) { 125 lwp_exit(l2); 126 kmem_free(newuc, sizeof(ucontext_t)); 127 return error; 128 } 129 130 /* 131 * Set the new LWP running, unless the caller has requested that 132 * it be created in suspended state. If the process is stopping, 133 * then the LWP is created stopped. 134 */ 135 mutex_enter(p->p_lock); 136 lwp_lock(l2); 137 spc = &l2->l_cpu->ci_schedstate; 138 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 && 139 (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { 140 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { 141 KASSERT(l2->l_wchan == NULL); 142 l2->l_stat = LSSTOP; 143 p->p_nrlwps--; 144 lwp_unlock_to(l2, spc->spc_lwplock); 145 } else { 146 KASSERT(lwp_locked(l2, spc->spc_mutex)); 147 l2->l_stat = LSRUN; 148 sched_enqueue(l2, false); 149 lwp_unlock(l2); 150 } 151 } else { 152 l2->l_stat = LSSUSPENDED; 153 p->p_nrlwps--; 154 lwp_unlock_to(l2, spc->spc_lwplock); 155 } 156 mutex_exit(p->p_lock); 157 158 return 0; 159 } 160 161 int 162 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval) 163 { 164 165 lwp_exit(l); 166 return 0; 167 } 168 169 int 170 sys__lwp_self(struct lwp *l, const void *v, register_t *retval) 171 { 172 173 *retval = l->l_lid; 174 return 0; 175 } 176 177 int 178 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval) 179 { 180 181 *retval = (uintptr_t)l->l_private; 182 return 0; 183 } 184 185 int 186 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap, 187 register_t *retval) 188 { 189 /* { 190 syscallarg(void *) ptr; 191 } */ 192 193 return lwp_setprivate(l, SCARG(uap, ptr)); 194 } 195 196 int 197 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap, 198 register_t *retval) 199 { 200 /* { 201 syscallarg(lwpid_t) target; 202 } */ 203 struct proc *p = l->l_proc; 204 struct lwp *t; 205 int error; 206 207 mutex_enter(p->p_lock); 208 209 #ifdef KERN_SA 210 if ((p->p_sflag & PS_SA) != 0 || p->p_sa != NULL) { 211 mutex_exit(p->p_lock); 212 return EINVAL; 213 } 214 #endif 215 216 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 217 mutex_exit(p->p_lock); 218 return ESRCH; 219 } 220 221 /* 222 * Check for deadlock, which is only possible when we're suspending 223 * ourself. XXX There is a short race here, as p_nrlwps is only 224 * incremented when an LWP suspends itself on the kernel/user 225 * boundary. It's still possible to kill -9 the process so we 226 * don't bother checking further. 227 */ 228 lwp_lock(t); 229 if ((t == l && p->p_nrlwps == 1) || 230 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) { 231 lwp_unlock(t); 232 mutex_exit(p->p_lock); 233 return EDEADLK; 234 } 235 236 /* 237 * Suspend the LWP. XXX If it's on a different CPU, we should wait 238 * for it to be preempted, where it will put itself to sleep. 239 * 240 * Suspension of the current LWP will happen on return to userspace. 241 */ 242 error = lwp_suspend(l, t); 243 if (error) { 244 mutex_exit(p->p_lock); 245 return error; 246 } 247 248 /* 249 * Wait for: 250 * o process exiting 251 * o target LWP suspended 252 * o target LWP not suspended and L_WSUSPEND clear 253 * o target LWP exited 254 */ 255 for (;;) { 256 error = cv_wait_sig(&p->p_lwpcv, p->p_lock); 257 if (error) { 258 error = ERESTART; 259 break; 260 } 261 if (lwp_find(p, SCARG(uap, target)) == NULL) { 262 error = ESRCH; 263 break; 264 } 265 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) { 266 error = ERESTART; 267 break; 268 } 269 if (t->l_stat == LSSUSPENDED || 270 (t->l_flag & LW_WSUSPEND) == 0) 271 break; 272 } 273 mutex_exit(p->p_lock); 274 275 return error; 276 } 277 278 int 279 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap, 280 register_t *retval) 281 { 282 /* { 283 syscallarg(lwpid_t) target; 284 } */ 285 int error; 286 struct proc *p = l->l_proc; 287 struct lwp *t; 288 289 error = 0; 290 291 mutex_enter(p->p_lock); 292 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 293 mutex_exit(p->p_lock); 294 return ESRCH; 295 } 296 297 lwp_lock(t); 298 lwp_continue(t); 299 mutex_exit(p->p_lock); 300 301 return error; 302 } 303 304 int 305 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap, 306 register_t *retval) 307 { 308 /* { 309 syscallarg(lwpid_t) target; 310 } */ 311 struct lwp *t; 312 struct proc *p; 313 int error; 314 315 p = l->l_proc; 316 mutex_enter(p->p_lock); 317 318 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) { 319 mutex_exit(p->p_lock); 320 return ESRCH; 321 } 322 323 lwp_lock(t); 324 t->l_flag |= (LW_CANCELLED | LW_UNPARKED); 325 326 if (t->l_stat != LSSLEEP) { 327 lwp_unlock(t); 328 error = ENODEV; 329 } else if ((t->l_flag & LW_SINTR) == 0) { 330 lwp_unlock(t); 331 error = EBUSY; 332 } else { 333 /* Wake it up. lwp_unsleep() will release the LWP lock. */ 334 lwp_unsleep(t, true); 335 error = 0; 336 } 337 338 mutex_exit(p->p_lock); 339 340 return error; 341 } 342 343 int 344 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap, 345 register_t *retval) 346 { 347 /* { 348 syscallarg(lwpid_t) wait_for; 349 syscallarg(lwpid_t *) departed; 350 } */ 351 struct proc *p = l->l_proc; 352 int error; 353 lwpid_t dep; 354 355 mutex_enter(p->p_lock); 356 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0); 357 mutex_exit(p->p_lock); 358 359 if (error) 360 return error; 361 362 if (SCARG(uap, departed)) { 363 error = copyout(&dep, SCARG(uap, departed), sizeof(dep)); 364 if (error) 365 return error; 366 } 367 368 return 0; 369 } 370 371 int 372 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap, 373 register_t *retval) 374 { 375 /* { 376 syscallarg(lwpid_t) target; 377 syscallarg(int) signo; 378 } */ 379 struct proc *p = l->l_proc; 380 struct lwp *t; 381 ksiginfo_t ksi; 382 int signo = SCARG(uap, signo); 383 int error = 0; 384 385 if ((u_int)signo >= NSIG) 386 return EINVAL; 387 388 KSI_INIT(&ksi); 389 ksi.ksi_signo = signo; 390 ksi.ksi_code = SI_LWP; 391 ksi.ksi_pid = p->p_pid; 392 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred); 393 ksi.ksi_lid = SCARG(uap, target); 394 395 mutex_enter(proc_lock); 396 mutex_enter(p->p_lock); 397 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL) 398 error = ESRCH; 399 else if (signo != 0) 400 kpsignal2(p, &ksi); 401 mutex_exit(p->p_lock); 402 mutex_exit(proc_lock); 403 404 return error; 405 } 406 407 int 408 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap, 409 register_t *retval) 410 { 411 /* { 412 syscallarg(lwpid_t) target; 413 } */ 414 struct proc *p; 415 struct lwp *t; 416 lwpid_t target; 417 int error; 418 419 target = SCARG(uap, target); 420 p = l->l_proc; 421 422 mutex_enter(p->p_lock); 423 424 if (l->l_lid == target) 425 t = l; 426 else { 427 /* 428 * We can't use lwp_find() here because the target might 429 * be a zombie. 430 */ 431 LIST_FOREACH(t, &p->p_lwps, l_sibling) 432 if (t->l_lid == target) 433 break; 434 } 435 436 /* 437 * If the LWP is already detached, there's nothing to do. 438 * If it's a zombie, we need to clean up after it. LSZOMB 439 * is visible with the proc mutex held. 440 * 441 * After we have detached or released the LWP, kick any 442 * other LWPs that may be sitting in _lwp_wait(), waiting 443 * for the target LWP to exit. 444 */ 445 if (t != NULL && t->l_stat != LSIDL) { 446 if ((t->l_prflag & LPR_DETACHED) == 0) { 447 p->p_ndlwps++; 448 t->l_prflag |= LPR_DETACHED; 449 if (t->l_stat == LSZOMB) { 450 /* Releases proc mutex. */ 451 lwp_free(t, false, false); 452 return 0; 453 } 454 error = 0; 455 456 /* 457 * Have any LWPs sleeping in lwp_wait() recheck 458 * for deadlock. 459 */ 460 cv_broadcast(&p->p_lwpcv); 461 } else 462 error = EINVAL; 463 } else 464 error = ESRCH; 465 466 mutex_exit(p->p_lock); 467 468 return error; 469 } 470 471 static inline wchan_t 472 lwp_park_wchan(struct proc *p, const void *hint) 473 { 474 475 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint); 476 } 477 478 int 479 lwp_unpark(lwpid_t target, const void *hint) 480 { 481 sleepq_t *sq; 482 wchan_t wchan; 483 kmutex_t *mp; 484 proc_t *p; 485 lwp_t *t; 486 487 /* 488 * Easy case: search for the LWP on the sleep queue. If 489 * it's parked, remove it from the queue and set running. 490 */ 491 p = curproc; 492 wchan = lwp_park_wchan(p, hint); 493 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 494 495 TAILQ_FOREACH(t, sq, l_sleepchain) 496 if (t->l_proc == p && t->l_lid == target) 497 break; 498 499 if (__predict_true(t != NULL)) { 500 sleepq_remove(sq, t); 501 mutex_spin_exit(mp); 502 return 0; 503 } 504 505 /* 506 * The LWP hasn't parked yet. Take the hit and mark the 507 * operation as pending. 508 */ 509 mutex_spin_exit(mp); 510 511 mutex_enter(p->p_lock); 512 if ((t = lwp_find(p, target)) == NULL) { 513 mutex_exit(p->p_lock); 514 return ESRCH; 515 } 516 517 /* 518 * It may not have parked yet, we may have raced, or it 519 * is parked on a different user sync object. 520 */ 521 lwp_lock(t); 522 if (t->l_syncobj == &lwp_park_sobj) { 523 /* Releases the LWP lock. */ 524 lwp_unsleep(t, true); 525 } else { 526 /* 527 * Set the operation pending. The next call to _lwp_park 528 * will return early. 529 */ 530 t->l_flag |= LW_UNPARKED; 531 lwp_unlock(t); 532 } 533 534 mutex_exit(p->p_lock); 535 return 0; 536 } 537 538 int 539 lwp_park(struct timespec *ts, const void *hint) 540 { 541 sleepq_t *sq; 542 kmutex_t *mp; 543 wchan_t wchan; 544 int timo, error; 545 lwp_t *l; 546 547 /* Fix up the given timeout value. */ 548 if (ts != NULL) { 549 error = abstimeout2timo(ts, &timo); 550 if (error) { 551 return error; 552 } 553 KASSERT(timo != 0); 554 } else { 555 timo = 0; 556 } 557 558 /* Find and lock the sleep queue. */ 559 l = curlwp; 560 wchan = lwp_park_wchan(l->l_proc, hint); 561 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 562 563 /* 564 * Before going the full route and blocking, check to see if an 565 * unpark op is pending. 566 */ 567 lwp_lock(l); 568 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) { 569 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED); 570 lwp_unlock(l); 571 mutex_spin_exit(mp); 572 return EALREADY; 573 } 574 lwp_unlock_to(l, mp); 575 l->l_biglocks = 0; 576 sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj); 577 error = sleepq_block(timo, true); 578 switch (error) { 579 case EWOULDBLOCK: 580 error = ETIMEDOUT; 581 break; 582 case ERESTART: 583 error = EINTR; 584 break; 585 default: 586 /* nothing */ 587 break; 588 } 589 return error; 590 } 591 592 /* 593 * 'park' an LWP waiting on a user-level synchronisation object. The LWP 594 * will remain parked until another LWP in the same process calls in and 595 * requests that it be unparked. 596 */ 597 int 598 sys____lwp_park50(struct lwp *l, const struct sys____lwp_park50_args *uap, 599 register_t *retval) 600 { 601 /* { 602 syscallarg(const struct timespec *) ts; 603 syscallarg(lwpid_t) unpark; 604 syscallarg(const void *) hint; 605 syscallarg(const void *) unparkhint; 606 } */ 607 struct timespec ts, *tsp; 608 int error; 609 610 if (SCARG(uap, ts) == NULL) 611 tsp = NULL; 612 else { 613 error = copyin(SCARG(uap, ts), &ts, sizeof(ts)); 614 if (error != 0) 615 return error; 616 tsp = &ts; 617 } 618 619 if (SCARG(uap, unpark) != 0) { 620 error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint)); 621 if (error != 0) 622 return error; 623 } 624 625 return lwp_park(tsp, SCARG(uap, hint)); 626 } 627 628 int 629 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap, 630 register_t *retval) 631 { 632 /* { 633 syscallarg(lwpid_t) target; 634 syscallarg(const void *) hint; 635 } */ 636 637 return lwp_unpark(SCARG(uap, target), SCARG(uap, hint)); 638 } 639 640 int 641 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, 642 register_t *retval) 643 { 644 /* { 645 syscallarg(const lwpid_t *) targets; 646 syscallarg(size_t) ntargets; 647 syscallarg(const void *) hint; 648 } */ 649 struct proc *p; 650 struct lwp *t; 651 sleepq_t *sq; 652 wchan_t wchan; 653 lwpid_t targets[32], *tp, *tpp, *tmax, target; 654 int error; 655 kmutex_t *mp; 656 u_int ntargets; 657 size_t sz; 658 659 p = l->l_proc; 660 ntargets = SCARG(uap, ntargets); 661 662 if (SCARG(uap, targets) == NULL) { 663 /* 664 * Let the caller know how much we are willing to do, and 665 * let it unpark the LWPs in blocks. 666 */ 667 *retval = LWP_UNPARK_MAX; 668 return 0; 669 } 670 if (ntargets > LWP_UNPARK_MAX || ntargets == 0) 671 return EINVAL; 672 673 /* 674 * Copy in the target array. If it's a small number of LWPs, then 675 * place the numbers on the stack. 676 */ 677 sz = sizeof(target) * ntargets; 678 if (sz <= sizeof(targets)) 679 tp = targets; 680 else { 681 tp = kmem_alloc(sz, KM_SLEEP); 682 if (tp == NULL) 683 return ENOMEM; 684 } 685 error = copyin(SCARG(uap, targets), tp, sz); 686 if (error != 0) { 687 if (tp != targets) { 688 kmem_free(tp, sz); 689 } 690 return error; 691 } 692 693 wchan = lwp_park_wchan(p, SCARG(uap, hint)); 694 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); 695 696 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) { 697 target = *tpp; 698 699 /* 700 * Easy case: search for the LWP on the sleep queue. If 701 * it's parked, remove it from the queue and set running. 702 */ 703 TAILQ_FOREACH(t, sq, l_sleepchain) 704 if (t->l_proc == p && t->l_lid == target) 705 break; 706 707 if (t != NULL) { 708 sleepq_remove(sq, t); 709 continue; 710 } 711 712 /* 713 * The LWP hasn't parked yet. Take the hit and 714 * mark the operation as pending. 715 */ 716 mutex_spin_exit(mp); 717 mutex_enter(p->p_lock); 718 if ((t = lwp_find(p, target)) == NULL) { 719 mutex_exit(p->p_lock); 720 mutex_spin_enter(mp); 721 continue; 722 } 723 lwp_lock(t); 724 725 /* 726 * It may not have parked yet, we may have raced, or 727 * it is parked on a different user sync object. 728 */ 729 if (t->l_syncobj == &lwp_park_sobj) { 730 /* Releases the LWP lock. */ 731 lwp_unsleep(t, true); 732 } else { 733 /* 734 * Set the operation pending. The next call to 735 * _lwp_park will return early. 736 */ 737 t->l_flag |= LW_UNPARKED; 738 lwp_unlock(t); 739 } 740 741 mutex_exit(p->p_lock); 742 mutex_spin_enter(mp); 743 } 744 745 mutex_spin_exit(mp); 746 if (tp != targets) 747 kmem_free(tp, sz); 748 749 return 0; 750 } 751 752 int 753 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap, 754 register_t *retval) 755 { 756 /* { 757 syscallarg(lwpid_t) target; 758 syscallarg(const char *) name; 759 } */ 760 char *name, *oname; 761 lwpid_t target; 762 proc_t *p; 763 lwp_t *t; 764 int error; 765 766 if ((target = SCARG(uap, target)) == 0) 767 target = l->l_lid; 768 769 name = kmem_alloc(MAXCOMLEN, KM_SLEEP); 770 if (name == NULL) 771 return ENOMEM; 772 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL); 773 switch (error) { 774 case ENAMETOOLONG: 775 case 0: 776 name[MAXCOMLEN - 1] = '\0'; 777 break; 778 default: 779 kmem_free(name, MAXCOMLEN); 780 return error; 781 } 782 783 p = curproc; 784 mutex_enter(p->p_lock); 785 if ((t = lwp_find(p, target)) == NULL) { 786 mutex_exit(p->p_lock); 787 kmem_free(name, MAXCOMLEN); 788 return ESRCH; 789 } 790 lwp_lock(t); 791 oname = t->l_name; 792 t->l_name = name; 793 lwp_unlock(t); 794 mutex_exit(p->p_lock); 795 796 if (oname != NULL) 797 kmem_free(oname, MAXCOMLEN); 798 799 return 0; 800 } 801 802 int 803 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap, 804 register_t *retval) 805 { 806 /* { 807 syscallarg(lwpid_t) target; 808 syscallarg(char *) name; 809 syscallarg(size_t) len; 810 } */ 811 char name[MAXCOMLEN]; 812 lwpid_t target; 813 proc_t *p; 814 lwp_t *t; 815 816 if ((target = SCARG(uap, target)) == 0) 817 target = l->l_lid; 818 819 p = curproc; 820 mutex_enter(p->p_lock); 821 if ((t = lwp_find(p, target)) == NULL) { 822 mutex_exit(p->p_lock); 823 return ESRCH; 824 } 825 lwp_lock(t); 826 if (t->l_name == NULL) 827 name[0] = '\0'; 828 else 829 strcpy(name, t->l_name); 830 lwp_unlock(t); 831 mutex_exit(p->p_lock); 832 833 return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL); 834 } 835 836 int 837 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap, 838 register_t *retval) 839 { 840 /* { 841 syscallarg(int) features; 842 syscallarg(struct lwpctl **) address; 843 } */ 844 int error, features; 845 vaddr_t vaddr; 846 847 features = SCARG(uap, features); 848 features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR); 849 if (features != 0) 850 return ENODEV; 851 if ((error = lwp_ctl_alloc(&vaddr)) != 0) 852 return error; 853 return copyout(&vaddr, SCARG(uap, address), sizeof(void *)); 854 } 855