1 /* $OpenBSD: kern_resource.c,v 1.80 2023/09/13 14:25:49 claudio Exp $ */ 2 /* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */ 3 4 /*- 5 * Copyright (c) 1982, 1986, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/file.h> 44 #include <sys/resourcevar.h> 45 #include <sys/pool.h> 46 #include <sys/proc.h> 47 #include <sys/ktrace.h> 48 #include <sys/sched.h> 49 #include <sys/signalvar.h> 50 51 #include <sys/mount.h> 52 #include <sys/syscallargs.h> 53 54 #include <uvm/uvm_extern.h> 55 #include <uvm/uvm.h> 56 57 /* Resource usage check interval in msec */ 58 #define RUCHECK_INTERVAL 1000 59 60 /* SIGXCPU interval in seconds of process runtime */ 61 #define SIGXCPU_INTERVAL 5 62 63 struct plimit *lim_copy(struct plimit *); 64 struct plimit *lim_write_begin(void); 65 void lim_write_commit(struct plimit *); 66 67 void tuagg_sub(struct tusage *, struct proc *, const struct timespec *); 68 69 /* 70 * Patchable maximum data and stack limits. 71 */ 72 rlim_t maxdmap = MAXDSIZ; 73 rlim_t maxsmap = MAXSSIZ; 74 75 /* 76 * Serializes resource limit updates. 77 * This lock has to be held together with ps_mtx when updating 78 * the process' ps_limit. 79 */ 80 struct rwlock rlimit_lock = RWLOCK_INITIALIZER("rlimitlk"); 81 82 /* 83 * Resource controls and accounting. 84 */ 85 86 int 87 sys_getpriority(struct proc *curp, void *v, register_t *retval) 88 { 89 struct sys_getpriority_args /* { 90 syscallarg(int) which; 91 syscallarg(id_t) who; 92 } */ *uap = v; 93 struct process *pr; 94 int low = NZERO + PRIO_MAX + 1; 95 96 switch (SCARG(uap, which)) { 97 98 case PRIO_PROCESS: 99 if (SCARG(uap, who) == 0) 100 pr = curp->p_p; 101 else 102 pr = prfind(SCARG(uap, who)); 103 if (pr == NULL) 104 break; 105 if (pr->ps_nice < low) 106 low = pr->ps_nice; 107 break; 108 109 case PRIO_PGRP: { 110 struct pgrp *pg; 111 112 if (SCARG(uap, who) == 0) 113 pg = curp->p_p->ps_pgrp; 114 else if ((pg = pgfind(SCARG(uap, who))) == NULL) 115 break; 116 LIST_FOREACH(pr, &pg->pg_members, ps_pglist) 117 if (pr->ps_nice < low) 118 low = pr->ps_nice; 119 break; 120 } 121 122 case PRIO_USER: 123 if (SCARG(uap, who) == 0) 124 SCARG(uap, who) = curp->p_ucred->cr_uid; 125 LIST_FOREACH(pr, &allprocess, ps_list) 126 if (pr->ps_ucred->cr_uid == SCARG(uap, who) && 127 pr->ps_nice < low) 128 low = pr->ps_nice; 129 break; 130 131 default: 132 return (EINVAL); 133 } 134 if (low == NZERO + PRIO_MAX + 1) 135 return (ESRCH); 136 *retval = low - NZERO; 137 return (0); 138 } 139 140 int 141 sys_setpriority(struct proc *curp, void *v, register_t *retval) 142 { 143 struct sys_setpriority_args /* { 144 syscallarg(int) which; 145 syscallarg(id_t) who; 146 syscallarg(int) prio; 147 } */ *uap = v; 148 struct process *pr; 149 int found = 0, error = 0; 150 151 switch (SCARG(uap, which)) { 152 153 case PRIO_PROCESS: 154 if (SCARG(uap, who) == 0) 155 pr = curp->p_p; 156 else 157 pr = prfind(SCARG(uap, who)); 158 if (pr == NULL) 159 break; 160 error = donice(curp, pr, SCARG(uap, prio)); 161 found = 1; 162 break; 163 164 case PRIO_PGRP: { 165 struct pgrp *pg; 166 167 if (SCARG(uap, who) == 0) 168 pg = curp->p_p->ps_pgrp; 169 else if ((pg = pgfind(SCARG(uap, who))) == NULL) 170 break; 171 LIST_FOREACH(pr, &pg->pg_members, ps_pglist) { 172 error = donice(curp, pr, SCARG(uap, prio)); 173 found = 1; 174 } 175 break; 176 } 177 178 case PRIO_USER: 179 if (SCARG(uap, who) == 0) 180 SCARG(uap, who) = curp->p_ucred->cr_uid; 181 LIST_FOREACH(pr, &allprocess, ps_list) 182 if (pr->ps_ucred->cr_uid == SCARG(uap, who)) { 183 error = donice(curp, pr, SCARG(uap, prio)); 184 found = 1; 185 } 186 break; 187 188 default: 189 return (EINVAL); 190 } 191 if (!found) 192 return (ESRCH); 193 return (error); 194 } 195 196 int 197 donice(struct proc *curp, struct process *chgpr, int n) 198 { 199 struct ucred *ucred = curp->p_ucred; 200 struct proc *p; 201 int s; 202 203 if (ucred->cr_uid != 0 && ucred->cr_ruid != 0 && 204 ucred->cr_uid != chgpr->ps_ucred->cr_uid && 205 ucred->cr_ruid != chgpr->ps_ucred->cr_uid) 206 return (EPERM); 207 if (n > PRIO_MAX) 208 n = PRIO_MAX; 209 if (n < PRIO_MIN) 210 n = PRIO_MIN; 211 n += NZERO; 212 if (n < chgpr->ps_nice && suser(curp)) 213 return (EACCES); 214 chgpr->ps_nice = n; 215 SCHED_LOCK(s); 216 TAILQ_FOREACH(p, &chgpr->ps_threads, p_thr_link) { 217 setpriority(p, p->p_estcpu, n); 218 } 219 SCHED_UNLOCK(s); 220 return (0); 221 } 222 223 int 224 sys_setrlimit(struct proc *p, void *v, register_t *retval) 225 { 226 struct sys_setrlimit_args /* { 227 syscallarg(int) which; 228 syscallarg(const struct rlimit *) rlp; 229 } */ *uap = v; 230 struct rlimit alim; 231 int error; 232 233 error = copyin((caddr_t)SCARG(uap, rlp), (caddr_t)&alim, 234 sizeof (struct rlimit)); 235 if (error) 236 return (error); 237 #ifdef KTRACE 238 if (KTRPOINT(p, KTR_STRUCT)) 239 ktrrlimit(p, &alim); 240 #endif 241 return (dosetrlimit(p, SCARG(uap, which), &alim)); 242 } 243 244 int 245 dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) 246 { 247 struct rlimit *alimp; 248 struct plimit *limit; 249 rlim_t maxlim; 250 int error; 251 252 if (which >= RLIM_NLIMITS || limp->rlim_cur > limp->rlim_max) 253 return (EINVAL); 254 255 rw_enter_write(&rlimit_lock); 256 257 alimp = &p->p_p->ps_limit->pl_rlimit[which]; 258 if (limp->rlim_max > alimp->rlim_max) { 259 if ((error = suser(p)) != 0) { 260 rw_exit_write(&rlimit_lock); 261 return (error); 262 } 263 } 264 265 /* Get exclusive write access to the limit structure. */ 266 limit = lim_write_begin(); 267 alimp = &limit->pl_rlimit[which]; 268 269 switch (which) { 270 case RLIMIT_DATA: 271 maxlim = maxdmap; 272 break; 273 case RLIMIT_STACK: 274 maxlim = maxsmap; 275 break; 276 case RLIMIT_NOFILE: 277 maxlim = maxfiles; 278 break; 279 case RLIMIT_NPROC: 280 maxlim = maxprocess; 281 break; 282 default: 283 maxlim = RLIM_INFINITY; 284 break; 285 } 286 287 if (limp->rlim_max > maxlim) 288 limp->rlim_max = maxlim; 289 if (limp->rlim_cur > limp->rlim_max) 290 limp->rlim_cur = limp->rlim_max; 291 292 if (which == RLIMIT_CPU && limp->rlim_cur != RLIM_INFINITY && 293 alimp->rlim_cur == RLIM_INFINITY) 294 timeout_add_msec(&p->p_p->ps_rucheck_to, RUCHECK_INTERVAL); 295 296 if (which == RLIMIT_STACK) { 297 /* 298 * Stack is allocated to the max at exec time with only 299 * "rlim_cur" bytes accessible. If stack limit is going 300 * up make more accessible, if going down make inaccessible. 301 */ 302 if (limp->rlim_cur != alimp->rlim_cur) { 303 vaddr_t addr; 304 vsize_t size; 305 vm_prot_t prot; 306 struct vmspace *vm = p->p_vmspace; 307 308 if (limp->rlim_cur > alimp->rlim_cur) { 309 prot = PROT_READ | PROT_WRITE; 310 size = limp->rlim_cur - alimp->rlim_cur; 311 #ifdef MACHINE_STACK_GROWS_UP 312 addr = (vaddr_t)vm->vm_maxsaddr + 313 alimp->rlim_cur; 314 #else 315 addr = (vaddr_t)vm->vm_minsaddr - 316 limp->rlim_cur; 317 #endif 318 } else { 319 prot = PROT_NONE; 320 size = alimp->rlim_cur - limp->rlim_cur; 321 #ifdef MACHINE_STACK_GROWS_UP 322 addr = (vaddr_t)vm->vm_maxsaddr + 323 limp->rlim_cur; 324 #else 325 addr = (vaddr_t)vm->vm_minsaddr - 326 alimp->rlim_cur; 327 #endif 328 } 329 addr = trunc_page(addr); 330 size = round_page(size); 331 KERNEL_LOCK(); 332 (void) uvm_map_protect(&vm->vm_map, addr, 333 addr+size, prot, UVM_ET_STACK, FALSE, FALSE); 334 KERNEL_UNLOCK(); 335 } 336 } 337 338 *alimp = *limp; 339 340 lim_write_commit(limit); 341 rw_exit_write(&rlimit_lock); 342 343 return (0); 344 } 345 346 int 347 sys_getrlimit(struct proc *p, void *v, register_t *retval) 348 { 349 struct sys_getrlimit_args /* { 350 syscallarg(int) which; 351 syscallarg(struct rlimit *) rlp; 352 } */ *uap = v; 353 struct plimit *limit; 354 struct rlimit alimp; 355 int error; 356 357 if (SCARG(uap, which) < 0 || SCARG(uap, which) >= RLIM_NLIMITS) 358 return (EINVAL); 359 limit = lim_read_enter(); 360 alimp = limit->pl_rlimit[SCARG(uap, which)]; 361 lim_read_leave(limit); 362 error = copyout(&alimp, SCARG(uap, rlp), sizeof(struct rlimit)); 363 #ifdef KTRACE 364 if (error == 0 && KTRPOINT(p, KTR_STRUCT)) 365 ktrrlimit(p, &alimp); 366 #endif 367 return (error); 368 } 369 370 void 371 tuagg_sub(struct tusage *tup, struct proc *p, const struct timespec *ts) 372 { 373 if (ts != NULL) 374 timespecadd(&tup->tu_runtime, ts, &tup->tu_runtime); 375 tup->tu_uticks += p->p_uticks; 376 tup->tu_sticks += p->p_sticks; 377 tup->tu_iticks += p->p_iticks; 378 } 379 380 /* 381 * Aggregate a single thread's immediate time counts into the running 382 * totals for the thread and process 383 */ 384 void 385 tuagg_locked(struct process *pr, struct proc *p, const struct timespec *ts) 386 { 387 tuagg_sub(&pr->ps_tu, p, ts); 388 tuagg_sub(&p->p_tu, p, ts); 389 p->p_uticks = 0; 390 p->p_sticks = 0; 391 p->p_iticks = 0; 392 } 393 394 void 395 tuagg(struct process *pr, struct proc *p) 396 { 397 int s; 398 399 SCHED_LOCK(s); 400 tuagg_locked(pr, p, NULL); 401 SCHED_UNLOCK(s); 402 } 403 404 /* 405 * Transform the running time and tick information in a struct tusage 406 * into user, system, and interrupt time usage. 407 */ 408 void 409 calctsru(struct tusage *tup, struct timespec *up, struct timespec *sp, 410 struct timespec *ip) 411 { 412 u_quad_t st, ut, it; 413 414 st = tup->tu_sticks; 415 ut = tup->tu_uticks; 416 it = tup->tu_iticks; 417 418 if (st + ut + it == 0) { 419 timespecclear(up); 420 timespecclear(sp); 421 if (ip != NULL) 422 timespecclear(ip); 423 return; 424 } 425 426 st = st * 1000000000 / stathz; 427 sp->tv_sec = st / 1000000000; 428 sp->tv_nsec = st % 1000000000; 429 ut = ut * 1000000000 / stathz; 430 up->tv_sec = ut / 1000000000; 431 up->tv_nsec = ut % 1000000000; 432 if (ip != NULL) { 433 it = it * 1000000000 / stathz; 434 ip->tv_sec = it / 1000000000; 435 ip->tv_nsec = it % 1000000000; 436 } 437 } 438 439 void 440 calcru(struct tusage *tup, struct timeval *up, struct timeval *sp, 441 struct timeval *ip) 442 { 443 struct timespec u, s, i; 444 445 calctsru(tup, &u, &s, ip != NULL ? &i : NULL); 446 TIMESPEC_TO_TIMEVAL(up, &u); 447 TIMESPEC_TO_TIMEVAL(sp, &s); 448 if (ip != NULL) 449 TIMESPEC_TO_TIMEVAL(ip, &i); 450 } 451 452 int 453 sys_getrusage(struct proc *p, void *v, register_t *retval) 454 { 455 struct sys_getrusage_args /* { 456 syscallarg(int) who; 457 syscallarg(struct rusage *) rusage; 458 } */ *uap = v; 459 struct rusage ru; 460 int error; 461 462 error = dogetrusage(p, SCARG(uap, who), &ru); 463 if (error == 0) { 464 error = copyout(&ru, SCARG(uap, rusage), sizeof(ru)); 465 #ifdef KTRACE 466 if (error == 0 && KTRPOINT(p, KTR_STRUCT)) 467 ktrrusage(p, &ru); 468 #endif 469 } 470 return (error); 471 } 472 473 int 474 dogetrusage(struct proc *p, int who, struct rusage *rup) 475 { 476 struct process *pr = p->p_p; 477 struct proc *q; 478 479 switch (who) { 480 481 case RUSAGE_SELF: 482 /* start with the sum of dead threads, if any */ 483 if (pr->ps_ru != NULL) 484 *rup = *pr->ps_ru; 485 else 486 memset(rup, 0, sizeof(*rup)); 487 488 /* add on all living threads */ 489 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 490 ruadd(rup, &q->p_ru); 491 tuagg(pr, q); 492 } 493 494 calcru(&pr->ps_tu, &rup->ru_utime, &rup->ru_stime, NULL); 495 break; 496 497 case RUSAGE_THREAD: 498 *rup = p->p_ru; 499 calcru(&p->p_tu, &rup->ru_utime, &rup->ru_stime, NULL); 500 break; 501 502 case RUSAGE_CHILDREN: 503 *rup = pr->ps_cru; 504 break; 505 506 default: 507 return (EINVAL); 508 } 509 return (0); 510 } 511 512 void 513 ruadd(struct rusage *ru, struct rusage *ru2) 514 { 515 long *ip, *ip2; 516 int i; 517 518 timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime); 519 timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime); 520 if (ru->ru_maxrss < ru2->ru_maxrss) 521 ru->ru_maxrss = ru2->ru_maxrss; 522 ip = &ru->ru_first; ip2 = &ru2->ru_first; 523 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 524 *ip++ += *ip2++; 525 } 526 527 /* 528 * Check if the process exceeds its cpu resource allocation. 529 * If over max, kill it. 530 */ 531 void 532 rucheck(void *arg) 533 { 534 struct rlimit rlim; 535 struct process *pr = arg; 536 time_t runtime; 537 int s; 538 539 KERNEL_ASSERT_LOCKED(); 540 541 SCHED_LOCK(s); 542 runtime = pr->ps_tu.tu_runtime.tv_sec; 543 SCHED_UNLOCK(s); 544 545 mtx_enter(&pr->ps_mtx); 546 rlim = pr->ps_limit->pl_rlimit[RLIMIT_CPU]; 547 mtx_leave(&pr->ps_mtx); 548 549 if ((rlim_t)runtime >= rlim.rlim_cur) { 550 if ((rlim_t)runtime >= rlim.rlim_max) { 551 prsignal(pr, SIGKILL); 552 } else if (runtime >= pr->ps_nextxcpu) { 553 prsignal(pr, SIGXCPU); 554 pr->ps_nextxcpu = runtime + SIGXCPU_INTERVAL; 555 } 556 } 557 558 timeout_add_msec(&pr->ps_rucheck_to, RUCHECK_INTERVAL); 559 } 560 561 struct pool plimit_pool; 562 563 void 564 lim_startup(struct plimit *limit0) 565 { 566 rlim_t lim; 567 int i; 568 569 pool_init(&plimit_pool, sizeof(struct plimit), 0, IPL_MPFLOOR, 570 PR_WAITOK, "plimitpl", NULL); 571 572 for (i = 0; i < nitems(limit0->pl_rlimit); i++) 573 limit0->pl_rlimit[i].rlim_cur = 574 limit0->pl_rlimit[i].rlim_max = RLIM_INFINITY; 575 limit0->pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE; 576 limit0->pl_rlimit[RLIMIT_NOFILE].rlim_max = MIN(NOFILE_MAX, 577 (maxfiles - NOFILE > NOFILE) ? maxfiles - NOFILE : NOFILE); 578 limit0->pl_rlimit[RLIMIT_NPROC].rlim_cur = MAXUPRC; 579 lim = ptoa(uvmexp.free); 580 limit0->pl_rlimit[RLIMIT_RSS].rlim_max = lim; 581 lim = ptoa(64*1024); /* Default to very low */ 582 limit0->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = lim; 583 limit0->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3; 584 refcnt_init(&limit0->pl_refcnt); 585 } 586 587 /* 588 * Make a copy of the plimit structure. 589 * We share these structures copy-on-write after fork, 590 * and copy when a limit is changed. 591 */ 592 struct plimit * 593 lim_copy(struct plimit *lim) 594 { 595 struct plimit *newlim; 596 597 newlim = pool_get(&plimit_pool, PR_WAITOK); 598 memcpy(newlim->pl_rlimit, lim->pl_rlimit, 599 sizeof(struct rlimit) * RLIM_NLIMITS); 600 refcnt_init(&newlim->pl_refcnt); 601 return (newlim); 602 } 603 604 void 605 lim_free(struct plimit *lim) 606 { 607 if (refcnt_rele(&lim->pl_refcnt) == 0) 608 return; 609 pool_put(&plimit_pool, lim); 610 } 611 612 void 613 lim_fork(struct process *parent, struct process *child) 614 { 615 struct plimit *limit; 616 617 mtx_enter(&parent->ps_mtx); 618 limit = parent->ps_limit; 619 refcnt_take(&limit->pl_refcnt); 620 mtx_leave(&parent->ps_mtx); 621 622 child->ps_limit = limit; 623 624 if (limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) 625 timeout_add_msec(&child->ps_rucheck_to, RUCHECK_INTERVAL); 626 } 627 628 /* 629 * Return an exclusive write reference to the process' resource limit structure. 630 * The caller has to release the structure by calling lim_write_commit(). 631 * 632 * This invalidates any plimit read reference held by the calling thread. 633 */ 634 struct plimit * 635 lim_write_begin(void) 636 { 637 struct plimit *limit; 638 struct proc *p = curproc; 639 640 rw_assert_wrlock(&rlimit_lock); 641 642 if (p->p_limit != NULL) 643 lim_free(p->p_limit); 644 p->p_limit = NULL; 645 646 /* 647 * It is safe to access ps_limit here without holding ps_mtx 648 * because rlimit_lock excludes other writers. 649 */ 650 651 limit = p->p_p->ps_limit; 652 if (P_HASSIBLING(p) || refcnt_shared(&limit->pl_refcnt)) 653 limit = lim_copy(limit); 654 655 return (limit); 656 } 657 658 /* 659 * Finish exclusive write access to the plimit structure. 660 * This makes the structure visible to other threads in the process. 661 */ 662 void 663 lim_write_commit(struct plimit *limit) 664 { 665 struct plimit *olimit; 666 struct proc *p = curproc; 667 668 rw_assert_wrlock(&rlimit_lock); 669 670 if (limit != p->p_p->ps_limit) { 671 mtx_enter(&p->p_p->ps_mtx); 672 olimit = p->p_p->ps_limit; 673 p->p_p->ps_limit = limit; 674 mtx_leave(&p->p_p->ps_mtx); 675 676 lim_free(olimit); 677 } 678 } 679 680 /* 681 * Begin read access to the process' resource limit structure. 682 * The access has to be finished by calling lim_read_leave(). 683 * 684 * Sections denoted by lim_read_enter() and lim_read_leave() cannot nest. 685 */ 686 struct plimit * 687 lim_read_enter(void) 688 { 689 struct plimit *limit; 690 struct proc *p = curproc; 691 struct process *pr = p->p_p; 692 693 /* 694 * This thread might not observe the latest value of ps_limit 695 * if another thread updated the limits very recently on another CPU. 696 * However, the anomaly should disappear quickly, especially if 697 * there is any synchronization activity between the threads (or 698 * the CPUs). 699 */ 700 701 limit = p->p_limit; 702 if (limit != pr->ps_limit) { 703 mtx_enter(&pr->ps_mtx); 704 limit = pr->ps_limit; 705 refcnt_take(&limit->pl_refcnt); 706 mtx_leave(&pr->ps_mtx); 707 if (p->p_limit != NULL) 708 lim_free(p->p_limit); 709 p->p_limit = limit; 710 } 711 KASSERT(limit != NULL); 712 return (limit); 713 } 714 715 /* 716 * Get the value of the resource limit in given process. 717 */ 718 rlim_t 719 lim_cur_proc(struct proc *p, int which) 720 { 721 struct process *pr = p->p_p; 722 rlim_t val; 723 724 KASSERT(which >= 0 && which < RLIM_NLIMITS); 725 726 mtx_enter(&pr->ps_mtx); 727 val = pr->ps_limit->pl_rlimit[which].rlim_cur; 728 mtx_leave(&pr->ps_mtx); 729 return (val); 730 } 731