xref: /dragonfly/sys/kern/kern_resource.c (revision 783d47c4)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
39  * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
40  */
41 
42 #include "opt_compat.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysproto.h>
47 #include <sys/file.h>
48 #include <sys/kern_syscall.h>
49 #include <sys/kernel.h>
50 #include <sys/resourcevar.h>
51 #include <sys/malloc.h>
52 #include <sys/proc.h>
53 #include <sys/priv.h>
54 #include <sys/time.h>
55 #include <sys/lockf.h>
56 
57 #include <vm/vm.h>
58 #include <vm/vm_param.h>
59 #include <sys/lock.h>
60 #include <vm/pmap.h>
61 #include <vm/vm_map.h>
62 
63 #include <sys/thread2.h>
64 #include <sys/spinlock2.h>
65 
66 static int donice (struct proc *chgp, int n);
67 static int doionice (struct proc *chgp, int n);
68 
69 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
70 #define	UIHASH(uid)	(&uihashtbl[(uid) & uihash])
71 static struct spinlock uihash_lock;
72 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
73 static u_long uihash;		/* size of hash table - 1 */
74 
75 static struct uidinfo	*uicreate (uid_t uid);
76 static struct uidinfo	*uilookup (uid_t uid);
77 
78 /*
79  * Resource controls and accounting.
80  */
81 
82 struct getpriority_info {
83 	int low;
84 	int who;
85 };
86 
87 static int getpriority_callback(struct proc *p, void *data);
88 
89 /*
90  * MPALMOSTSAFE
91  */
92 int
93 sys_getpriority(struct getpriority_args *uap)
94 {
95 	struct getpriority_info info;
96 	struct proc *curp = curproc;
97 	struct proc *p;
98 	int low = PRIO_MAX + 1;
99 	int error;
100 
101 	switch (uap->which) {
102 	case PRIO_PROCESS:
103 		if (uap->who == 0) {
104 			p = curp;
105 			PHOLD(p);
106 		} else {
107 			p = pfind(uap->who);
108 		}
109 		if (p) {
110 			if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
111 				low = p->p_nice;
112 			}
113 			PRELE(p);
114 		}
115 		break;
116 
117 	case PRIO_PGRP:
118 	{
119 		struct pgrp *pg;
120 
121 		if (uap->who == 0) {
122 			pg = curp->p_pgrp;
123 			pgref(pg);
124 		} else if ((pg = pgfind(uap->who)) == NULL) {
125 			break;
126 		} /* else ref held from pgfind */
127 
128 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
129 			if (PRISON_CHECK(curp->p_ucred, p->p_ucred) &&
130 			    p->p_nice < low) {
131 				low = p->p_nice;
132 			}
133 		}
134 		pgrel(pg);
135 		break;
136 	}
137 	case PRIO_USER:
138 		if (uap->who == 0)
139 			uap->who = curp->p_ucred->cr_uid;
140 		info.low = low;
141 		info.who = uap->who;
142 		allproc_scan(getpriority_callback, &info);
143 		low = info.low;
144 		break;
145 
146 	default:
147 		error = EINVAL;
148 		goto done;
149 	}
150 	if (low == PRIO_MAX + 1) {
151 		error = ESRCH;
152 		goto done;
153 	}
154 	uap->sysmsg_result = low;
155 	error = 0;
156 done:
157 	return (error);
158 }
159 
160 /*
161  * Figure out the current lowest nice priority for processes owned
162  * by the specified user.
163  */
164 static
165 int
166 getpriority_callback(struct proc *p, void *data)
167 {
168 	struct getpriority_info *info = data;
169 
170 	if (PRISON_CHECK(curproc->p_ucred, p->p_ucred) &&
171 	    p->p_ucred->cr_uid == info->who &&
172 	    p->p_nice < info->low) {
173 		info->low = p->p_nice;
174 	}
175 	return(0);
176 }
177 
178 struct setpriority_info {
179 	int prio;
180 	int who;
181 	int error;
182 	int found;
183 };
184 
185 static int setpriority_callback(struct proc *p, void *data);
186 
187 /*
188  * MPALMOSTSAFE
189  */
190 int
191 sys_setpriority(struct setpriority_args *uap)
192 {
193 	struct setpriority_info info;
194 	struct proc *curp = curproc;
195 	struct proc *p;
196 	int found = 0, error = 0;
197 
198 	lwkt_gettoken(&proc_token);
199 
200 	switch (uap->which) {
201 	case PRIO_PROCESS:
202 		if (uap->who == 0) {
203 			p = curp;
204 			PHOLD(p);
205 		} else {
206 			p = pfind(uap->who);
207 		}
208 		if (p) {
209 			if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
210 				error = donice(p, uap->prio);
211 				found++;
212 			}
213 			PRELE(p);
214 		}
215 		break;
216 
217 	case PRIO_PGRP:
218 	{
219 		struct pgrp *pg;
220 
221 		if (uap->who == 0) {
222 			pg = curp->p_pgrp;
223 			pgref(pg);
224 		} else if ((pg = pgfind(uap->who)) == NULL) {
225 			break;
226 		} /* else ref held from pgfind */
227 
228 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
229 			if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
230 				error = donice(p, uap->prio);
231 				found++;
232 			}
233 		}
234 		pgrel(pg);
235 		break;
236 	}
237 	case PRIO_USER:
238 		if (uap->who == 0)
239 			uap->who = curp->p_ucred->cr_uid;
240 		info.prio = uap->prio;
241 		info.who = uap->who;
242 		info.error = 0;
243 		info.found = 0;
244 		allproc_scan(setpriority_callback, &info);
245 		error = info.error;
246 		found = info.found;
247 		break;
248 
249 	default:
250 		error = EINVAL;
251 		found = 1;
252 		break;
253 	}
254 
255 	lwkt_reltoken(&proc_token);
256 
257 	if (found == 0)
258 		error = ESRCH;
259 	return (error);
260 }
261 
262 static
263 int
264 setpriority_callback(struct proc *p, void *data)
265 {
266 	struct setpriority_info *info = data;
267 	int error;
268 
269 	if (p->p_ucred->cr_uid == info->who &&
270 	    PRISON_CHECK(curproc->p_ucred, p->p_ucred)) {
271 		error = donice(p, info->prio);
272 		if (error)
273 			info->error = error;
274 		++info->found;
275 	}
276 	return(0);
277 }
278 
279 static int
280 donice(struct proc *chgp, int n)
281 {
282 	struct proc *curp = curproc;
283 	struct ucred *cr = curp->p_ucred;
284 	struct lwp *lp;
285 
286 	if (cr->cr_uid && cr->cr_ruid &&
287 	    cr->cr_uid != chgp->p_ucred->cr_uid &&
288 	    cr->cr_ruid != chgp->p_ucred->cr_uid)
289 		return (EPERM);
290 	if (n > PRIO_MAX)
291 		n = PRIO_MAX;
292 	if (n < PRIO_MIN)
293 		n = PRIO_MIN;
294 	if (n < chgp->p_nice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
295 		return (EACCES);
296 	chgp->p_nice = n;
297 	FOREACH_LWP_IN_PROC(lp, chgp) {
298 		LWPHOLD(lp);
299 		chgp->p_usched->resetpriority(lp);
300 		LWPRELE(lp);
301 	}
302 	return (0);
303 }
304 
305 
306 struct ioprio_get_info {
307 	int high;
308 	int who;
309 };
310 
311 static int ioprio_get_callback(struct proc *p, void *data);
312 
313 /*
314  * MPALMOSTSAFE
315  */
316 int
317 sys_ioprio_get(struct ioprio_get_args *uap)
318 {
319 	struct ioprio_get_info info;
320 	struct proc *curp = curproc;
321 	struct proc *p;
322 	int high = IOPRIO_MIN-2;
323 	int error;
324 
325 	lwkt_gettoken(&proc_token);
326 
327 	switch (uap->which) {
328 	case PRIO_PROCESS:
329 		if (uap->who == 0) {
330 			p = curp;
331 			PHOLD(p);
332 		} else {
333 			p = pfind(uap->who);
334 		}
335 		if (p) {
336 			if (PRISON_CHECK(curp->p_ucred, p->p_ucred))
337 				high = p->p_ionice;
338 			PRELE(p);
339 		}
340 		break;
341 
342 	case PRIO_PGRP:
343 	{
344 		struct pgrp *pg;
345 
346 		if (uap->who == 0) {
347 			pg = curp->p_pgrp;
348 			pgref(pg);
349 		} else if ((pg = pgfind(uap->who)) == NULL) {
350 			break;
351 		} /* else ref held from pgfind */
352 
353 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
354 			if (PRISON_CHECK(curp->p_ucred, p->p_ucred) &&
355 			    p->p_nice > high)
356 				high = p->p_ionice;
357 		}
358 		pgrel(pg);
359 		break;
360 	}
361 	case PRIO_USER:
362 		if (uap->who == 0)
363 			uap->who = curp->p_ucred->cr_uid;
364 		info.high = high;
365 		info.who = uap->who;
366 		allproc_scan(ioprio_get_callback, &info);
367 		high = info.high;
368 		break;
369 
370 	default:
371 		error = EINVAL;
372 		goto done;
373 	}
374 	if (high == IOPRIO_MIN-2) {
375 		error = ESRCH;
376 		goto done;
377 	}
378 	uap->sysmsg_result = high;
379 	error = 0;
380 done:
381 	lwkt_reltoken(&proc_token);
382 
383 	return (error);
384 }
385 
386 /*
387  * Figure out the current lowest nice priority for processes owned
388  * by the specified user.
389  */
390 static
391 int
392 ioprio_get_callback(struct proc *p, void *data)
393 {
394 	struct ioprio_get_info *info = data;
395 
396 	if (PRISON_CHECK(curproc->p_ucred, p->p_ucred) &&
397 	    p->p_ucred->cr_uid == info->who &&
398 	    p->p_ionice > info->high) {
399 		info->high = p->p_ionice;
400 	}
401 	return(0);
402 }
403 
404 
405 struct ioprio_set_info {
406 	int prio;
407 	int who;
408 	int error;
409 	int found;
410 };
411 
412 static int ioprio_set_callback(struct proc *p, void *data);
413 
414 /*
415  * MPALMOSTSAFE
416  */
417 int
418 sys_ioprio_set(struct ioprio_set_args *uap)
419 {
420 	struct ioprio_set_info info;
421 	struct proc *curp = curproc;
422 	struct proc *p;
423 	int found = 0, error = 0;
424 
425 	lwkt_gettoken(&proc_token);
426 
427 	switch (uap->which) {
428 	case PRIO_PROCESS:
429 		if (uap->who == 0) {
430 			p = curp;
431 			PHOLD(p);
432 		} else {
433 			p = pfind(uap->who);
434 		}
435 		if (p) {
436 			if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
437 				error = doionice(p, uap->prio);
438 				found++;
439 			}
440 			PRELE(p);
441 		}
442 		break;
443 
444 	case PRIO_PGRP:
445 	{
446 		struct pgrp *pg;
447 
448 		if (uap->who == 0) {
449 			pg = curp->p_pgrp;
450 			pgref(pg);
451 		} else if ((pg = pgfind(uap->who)) == NULL) {
452 			break;
453 		} /* else ref held from pgfind */
454 
455 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
456 			if (PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
457 				error = doionice(p, uap->prio);
458 				found++;
459 			}
460 		}
461 		pgrel(pg);
462 		break;
463 	}
464 	case PRIO_USER:
465 		if (uap->who == 0)
466 			uap->who = curp->p_ucred->cr_uid;
467 		info.prio = uap->prio;
468 		info.who = uap->who;
469 		info.error = 0;
470 		info.found = 0;
471 		allproc_scan(ioprio_set_callback, &info);
472 		error = info.error;
473 		found = info.found;
474 		break;
475 
476 	default:
477 		error = EINVAL;
478 		found = 1;
479 		break;
480 	}
481 
482 	lwkt_reltoken(&proc_token);
483 
484 	if (found == 0)
485 		error = ESRCH;
486 	return (error);
487 }
488 
489 static
490 int
491 ioprio_set_callback(struct proc *p, void *data)
492 {
493 	struct ioprio_set_info *info = data;
494 	int error;
495 
496 	if (p->p_ucred->cr_uid == info->who &&
497 	    PRISON_CHECK(curproc->p_ucred, p->p_ucred)) {
498 		error = doionice(p, info->prio);
499 		if (error)
500 			info->error = error;
501 		++info->found;
502 	}
503 	return(0);
504 }
505 
506 int
507 doionice(struct proc *chgp, int n)
508 {
509 	struct proc *curp = curproc;
510 	struct ucred *cr = curp->p_ucred;
511 
512 	if (cr->cr_uid && cr->cr_ruid &&
513 	    cr->cr_uid != chgp->p_ucred->cr_uid &&
514 	    cr->cr_ruid != chgp->p_ucred->cr_uid)
515 		return (EPERM);
516 	if (n > IOPRIO_MAX)
517 		n = IOPRIO_MAX;
518 	if (n < IOPRIO_MIN)
519 		n = IOPRIO_MIN;
520 	if (n < chgp->p_ionice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
521 		return (EACCES);
522 	chgp->p_ionice = n;
523 
524 	return (0);
525 
526 }
527 
528 /*
529  * MPALMOSTSAFE
530  */
531 int
532 sys_lwp_rtprio(struct lwp_rtprio_args *uap)
533 {
534 	struct proc *p;
535 	struct lwp *lp;
536 	struct rtprio rtp;
537 	struct ucred *cr = curthread->td_ucred;
538 	int error;
539 
540 	error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
541 	if (error)
542 		return error;
543 	if (uap->pid < 0)
544 		return EINVAL;
545 
546 	lwkt_gettoken(&proc_token);
547 
548 	if (uap->pid == 0) {
549 		p = curproc;
550 		PHOLD(p);
551 	} else {
552 		p = pfind(uap->pid);
553 	}
554 
555 	if (p == NULL) {
556 		error = ESRCH;
557 		goto done;
558 	}
559 
560 	if (uap->tid < -1) {
561 		error = EINVAL;
562 		goto done;
563 	}
564 	if (uap->tid == -1) {
565 		/*
566 		 * sadly, tid can be 0 so we can't use 0 here
567 		 * like sys_rtprio()
568 		 */
569 		lp = curthread->td_lwp;
570 	} else {
571 		lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, uap->tid);
572 		if (lp == NULL) {
573 			error = ESRCH;
574 			goto done;
575 		}
576 	}
577 
578 	switch (uap->function) {
579 	case RTP_LOOKUP:
580 		error = copyout(&lp->lwp_rtprio, uap->rtp,
581 				sizeof(struct rtprio));
582 		break;
583 	case RTP_SET:
584 		if (cr->cr_uid && cr->cr_ruid &&
585 		    cr->cr_uid != p->p_ucred->cr_uid &&
586 		    cr->cr_ruid != p->p_ucred->cr_uid) {
587 			error = EPERM;
588 			break;
589 		}
590 		/* disallow setting rtprio in most cases if not superuser */
591 		if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
592 			/* can't set someone else's */
593 			if (uap->pid) { /* XXX */
594 				error = EPERM;
595 				break;
596 			}
597 			/* can't set realtime priority */
598 /*
599  * Realtime priority has to be restricted for reasons which should be
600  * obvious. However, for idle priority, there is a potential for
601  * system deadlock if an idleprio process gains a lock on a resource
602  * that other processes need (and the idleprio process can't run
603  * due to a CPU-bound normal process). Fix me! XXX
604  */
605  			if (RTP_PRIO_IS_REALTIME(rtp.type)) {
606 				error = EPERM;
607 				break;
608 			}
609 		}
610 		switch (rtp.type) {
611 #ifdef RTP_PRIO_FIFO
612 		case RTP_PRIO_FIFO:
613 #endif
614 		case RTP_PRIO_REALTIME:
615 		case RTP_PRIO_NORMAL:
616 		case RTP_PRIO_IDLE:
617 			if (rtp.prio > RTP_PRIO_MAX) {
618 				error = EINVAL;
619 			} else {
620 				lp->lwp_rtprio = rtp;
621 				error = 0;
622 			}
623 			break;
624 		default:
625 			error = EINVAL;
626 			break;
627 		}
628 		break;
629 	default:
630 		error = EINVAL;
631 		break;
632 	}
633 
634 done:
635 	if (p)
636 		PRELE(p);
637 	lwkt_reltoken(&proc_token);
638 
639 	return (error);
640 }
641 
642 /*
643  * Set realtime priority
644  *
645  * MPALMOSTSAFE
646  */
647 int
648 sys_rtprio(struct rtprio_args *uap)
649 {
650 	struct proc *p;
651 	struct lwp *lp;
652 	struct ucred *cr = curthread->td_ucred;
653 	struct rtprio rtp;
654 	int error;
655 
656 	error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
657 	if (error)
658 		return (error);
659 
660 	lwkt_gettoken(&proc_token);
661 
662 	if (uap->pid == 0) {
663 		p = curproc;
664 		PHOLD(p);
665 	} else {
666 		p = pfind(uap->pid);
667 	}
668 
669 	if (p == NULL) {
670 		error = ESRCH;
671 		goto done;
672 	}
673 
674 	/* XXX lwp */
675 	lp = FIRST_LWP_IN_PROC(p);
676 	switch (uap->function) {
677 	case RTP_LOOKUP:
678 		error = copyout(&lp->lwp_rtprio, uap->rtp,
679 				sizeof(struct rtprio));
680 		break;
681 	case RTP_SET:
682 		if (cr->cr_uid && cr->cr_ruid &&
683 		    cr->cr_uid != p->p_ucred->cr_uid &&
684 		    cr->cr_ruid != p->p_ucred->cr_uid) {
685 			error = EPERM;
686 			break;
687 		}
688 		/* disallow setting rtprio in most cases if not superuser */
689 		if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
690 			/* can't set someone else's */
691 			if (uap->pid) {
692 				error = EPERM;
693 				break;
694 			}
695 			/* can't set realtime priority */
696 /*
697  * Realtime priority has to be restricted for reasons which should be
698  * obvious. However, for idle priority, there is a potential for
699  * system deadlock if an idleprio process gains a lock on a resource
700  * that other processes need (and the idleprio process can't run
701  * due to a CPU-bound normal process). Fix me! XXX
702  */
703 			if (RTP_PRIO_IS_REALTIME(rtp.type)) {
704 				error = EPERM;
705 				break;
706 			}
707 		}
708 		switch (rtp.type) {
709 #ifdef RTP_PRIO_FIFO
710 		case RTP_PRIO_FIFO:
711 #endif
712 		case RTP_PRIO_REALTIME:
713 		case RTP_PRIO_NORMAL:
714 		case RTP_PRIO_IDLE:
715 			if (rtp.prio > RTP_PRIO_MAX) {
716 				error = EINVAL;
717 				break;
718 			}
719 			lp->lwp_rtprio = rtp;
720 			error = 0;
721 			break;
722 		default:
723 			error = EINVAL;
724 			break;
725 		}
726 		break;
727 	default:
728 		error = EINVAL;
729 		break;
730 	}
731 done:
732 	if (p)
733 		PRELE(p);
734 	lwkt_reltoken(&proc_token);
735 
736 	return (error);
737 }
738 
739 /*
740  * MPSAFE
741  */
742 int
743 sys_setrlimit(struct __setrlimit_args *uap)
744 {
745 	struct rlimit alim;
746 	int error;
747 
748 	error = copyin(uap->rlp, &alim, sizeof(alim));
749 	if (error)
750 		return (error);
751 
752 	error = kern_setrlimit(uap->which, &alim);
753 
754 	return (error);
755 }
756 
757 /*
758  * MPSAFE
759  */
760 int
761 sys_getrlimit(struct __getrlimit_args *uap)
762 {
763 	struct rlimit lim;
764 	int error;
765 
766 	error = kern_getrlimit(uap->which, &lim);
767 
768 	if (error == 0)
769 		error = copyout(&lim, uap->rlp, sizeof(*uap->rlp));
770 	return error;
771 }
772 
773 /*
774  * Transform the running time and tick information in lwp lp's thread into user,
775  * system, and interrupt time usage.
776  *
777  * Since we are limited to statclock tick granularity this is a statisical
778  * calculation which will be correct over the long haul, but should not be
779  * expected to measure fine grained deltas.
780  *
781  * It is possible to catch a lwp in the midst of being created, so
782  * check whether lwp_thread is NULL or not.
783  */
784 void
785 calcru(struct lwp *lp, struct timeval *up, struct timeval *sp)
786 {
787 	struct thread *td;
788 
789 	/*
790 	 * Calculate at the statclock level.  YYY if the thread is owned by
791 	 * another cpu we need to forward the request to the other cpu, or
792 	 * have a token to interlock the information in order to avoid racing
793 	 * thread destruction.
794 	 */
795 	if ((td = lp->lwp_thread) != NULL) {
796 		crit_enter();
797 		up->tv_sec = td->td_uticks / 1000000;
798 		up->tv_usec = td->td_uticks % 1000000;
799 		sp->tv_sec = td->td_sticks / 1000000;
800 		sp->tv_usec = td->td_sticks % 1000000;
801 		crit_exit();
802 	}
803 }
804 
805 /*
806  * Aggregate resource statistics of all lwps of a process.
807  *
808  * proc.p_ru keeps track of all statistics directly related to a proc.  This
809  * consists of RSS usage and nswap information and aggregate numbers for all
810  * former lwps of this proc.
811  *
812  * proc.p_cru is the sum of all stats of reaped children.
813  *
814  * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
815  * packet, scheduler switch or page fault counts, etc.  This information gets
816  * added to lwp.lwp_proc.p_ru when the lwp exits.
817  */
818 void
819 calcru_proc(struct proc *p, struct rusage *ru)
820 {
821 	struct timeval upt, spt;
822 	long *rip1, *rip2;
823 	struct lwp *lp;
824 
825 	*ru = p->p_ru;
826 
827 	FOREACH_LWP_IN_PROC(lp, p) {
828 		calcru(lp, &upt, &spt);
829 		timevaladd(&ru->ru_utime, &upt);
830 		timevaladd(&ru->ru_stime, &spt);
831 		for (rip1 = &ru->ru_first, rip2 = &lp->lwp_ru.ru_first;
832 		     rip1 <= &ru->ru_last;
833 		     rip1++, rip2++)
834 			*rip1 += *rip2;
835 	}
836 }
837 
838 
839 /*
840  * MPALMOSTSAFE
841  */
842 int
843 sys_getrusage(struct getrusage_args *uap)
844 {
845 	struct rusage ru;
846 	struct rusage *rup;
847 	int error;
848 
849 	lwkt_gettoken(&proc_token);
850 
851 	switch (uap->who) {
852 	case RUSAGE_SELF:
853 		rup = &ru;
854 		calcru_proc(curproc, rup);
855 		error = 0;
856 		break;
857 	case RUSAGE_CHILDREN:
858 		rup = &curproc->p_cru;
859 		error = 0;
860 		break;
861 	default:
862 		error = EINVAL;
863 		break;
864 	}
865 	if (error == 0)
866 		error = copyout(rup, uap->rusage, sizeof(struct rusage));
867 	lwkt_reltoken(&proc_token);
868 	return (error);
869 }
870 
871 void
872 ruadd(struct rusage *ru, struct rusage *ru2)
873 {
874 	long *ip, *ip2;
875 	int i;
876 
877 	timevaladd(&ru->ru_utime, &ru2->ru_utime);
878 	timevaladd(&ru->ru_stime, &ru2->ru_stime);
879 	if (ru->ru_maxrss < ru2->ru_maxrss)
880 		ru->ru_maxrss = ru2->ru_maxrss;
881 	ip = &ru->ru_first; ip2 = &ru2->ru_first;
882 	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
883 		*ip++ += *ip2++;
884 }
885 
886 /*
887  * Find the uidinfo structure for a uid.  This structure is used to
888  * track the total resource consumption (process count, socket buffer
889  * size, etc.) for the uid and impose limits.
890  */
891 void
892 uihashinit(void)
893 {
894 	spin_init(&uihash_lock);
895 	uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
896 }
897 
898 /*
899  * NOTE: Must be called with uihash_lock held
900  *
901  * MPSAFE
902  */
903 static struct uidinfo *
904 uilookup(uid_t uid)
905 {
906 	struct	uihashhead *uipp;
907 	struct	uidinfo *uip;
908 
909 	uipp = UIHASH(uid);
910 	LIST_FOREACH(uip, uipp, ui_hash) {
911 		if (uip->ui_uid == uid)
912 			break;
913 	}
914 	return (uip);
915 }
916 
917 /*
918  * Helper function to creat ea uid that could not be found.
919  * This function will properly deal with races.
920  *
921  * MPSAFE
922  */
923 static struct uidinfo *
924 uicreate(uid_t uid)
925 {
926 	struct	uidinfo *uip, *tmp;
927 
928 	/*
929 	 * Allocate space and check for a race
930 	 */
931 	uip = kmalloc(sizeof(*uip), M_UIDINFO, M_WAITOK|M_ZERO);
932 
933 	/*
934 	 * Initialize structure and enter it into the hash table
935 	 */
936 	spin_init(&uip->ui_lock);
937 	uip->ui_uid = uid;
938 	uip->ui_ref = 1;	/* we're returning a ref */
939 	varsymset_init(&uip->ui_varsymset, NULL);
940 
941 	/*
942 	 * Somebody may have already created the uidinfo for this
943 	 * uid. If so, return that instead.
944 	 */
945 	spin_lock(&uihash_lock);
946 	tmp = uilookup(uid);
947 	if (tmp != NULL) {
948 		uihold(tmp);
949 		spin_unlock(&uihash_lock);
950 
951 		spin_uninit(&uip->ui_lock);
952 		varsymset_clean(&uip->ui_varsymset);
953 		kfree(uip, M_UIDINFO);
954 		uip = tmp;
955 	} else {
956 		LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
957 		spin_unlock(&uihash_lock);
958 	}
959 	return (uip);
960 }
961 
962 /*
963  *
964  *
965  * MPSAFE
966  */
967 struct uidinfo *
968 uifind(uid_t uid)
969 {
970 	struct	uidinfo *uip;
971 
972 	spin_lock(&uihash_lock);
973 	uip = uilookup(uid);
974 	if (uip == NULL) {
975 		spin_unlock(&uihash_lock);
976 		uip = uicreate(uid);
977 	} else {
978 		uihold(uip);
979 		spin_unlock(&uihash_lock);
980 	}
981 	return (uip);
982 }
983 
984 /*
985  * Helper funtion to remove a uidinfo whos reference count is
986  * transitioning from 1->0.  The reference count is 1 on call.
987  *
988  * Zero is returned on success, otherwise non-zero and the
989  * uiphas not been removed.
990  *
991  * MPSAFE
992  */
993 static __inline int
994 uifree(struct uidinfo *uip)
995 {
996 	/*
997 	 * If we are still the only holder after acquiring the uihash_lock
998 	 * we can safely unlink the uip and destroy it.  Otherwise we lost
999 	 * a race and must fail.
1000 	 */
1001 	spin_lock(&uihash_lock);
1002 	if (uip->ui_ref != 1) {
1003 		spin_unlock(&uihash_lock);
1004 		return(-1);
1005 	}
1006 	LIST_REMOVE(uip, ui_hash);
1007 	spin_unlock(&uihash_lock);
1008 
1009 	/*
1010 	 * The uip is now orphaned and we can destroy it at our
1011 	 * leisure.
1012 	 */
1013 	if (uip->ui_sbsize != 0)
1014 		kprintf("freeing uidinfo: uid = %d, sbsize = %jd\n",
1015 		    uip->ui_uid, (intmax_t)uip->ui_sbsize);
1016 	if (uip->ui_proccnt != 0)
1017 		kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1018 		    uip->ui_uid, uip->ui_proccnt);
1019 
1020 	varsymset_clean(&uip->ui_varsymset);
1021 	lockuninit(&uip->ui_varsymset.vx_lock);
1022 	spin_uninit(&uip->ui_lock);
1023 	kfree(uip, M_UIDINFO);
1024 	return(0);
1025 }
1026 
1027 /*
1028  * MPSAFE
1029  */
1030 void
1031 uihold(struct uidinfo *uip)
1032 {
1033 	atomic_add_int(&uip->ui_ref, 1);
1034 	KKASSERT(uip->ui_ref >= 0);
1035 }
1036 
1037 /*
1038  * NOTE: It is important for us to not drop the ref count to 0
1039  *	 because this can cause a 2->0/2->0 race with another
1040  *	 concurrent dropper.  Losing the race in that situation
1041  *	 can cause uip to become stale for one of the other
1042  *	 threads.
1043  *
1044  * MPSAFE
1045  */
1046 void
1047 uidrop(struct uidinfo *uip)
1048 {
1049 	int ref;
1050 
1051 	KKASSERT(uip->ui_ref > 0);
1052 
1053 	for (;;) {
1054 		ref = uip->ui_ref;
1055 		cpu_ccfence();
1056 		if (ref == 1) {
1057 			if (uifree(uip) == 0)
1058 				break;
1059 		} else if (atomic_cmpset_int(&uip->ui_ref, ref, ref - 1)) {
1060 			break;
1061 		}
1062 		/* else retry */
1063 	}
1064 }
1065 
1066 void
1067 uireplace(struct uidinfo **puip, struct uidinfo *nuip)
1068 {
1069 	uidrop(*puip);
1070 	*puip = nuip;
1071 }
1072 
1073 /*
1074  * Change the count associated with number of processes
1075  * a given user is using.  When 'max' is 0, don't enforce a limit
1076  */
1077 int
1078 chgproccnt(struct uidinfo *uip, int diff, int max)
1079 {
1080 	int ret;
1081 	spin_lock(&uip->ui_lock);
1082 	/* don't allow them to exceed max, but allow subtraction */
1083 	if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
1084 		ret = 0;
1085 	} else {
1086 		uip->ui_proccnt += diff;
1087 		if (uip->ui_proccnt < 0)
1088 			kprintf("negative proccnt for uid = %d\n", uip->ui_uid);
1089 		ret = 1;
1090 	}
1091 	spin_unlock(&uip->ui_lock);
1092 	return ret;
1093 }
1094 
1095 /*
1096  * Change the total socket buffer size a user has used.
1097  */
1098 int
1099 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
1100 {
1101 	rlim_t new;
1102 
1103 	spin_lock(&uip->ui_lock);
1104 	new = uip->ui_sbsize + to - *hiwat;
1105 	KKASSERT(new >= 0);
1106 
1107 	/*
1108 	 * If we are trying to increase the socket buffer size
1109 	 * Scale down the hi water mark when we exceed the user's
1110 	 * allowed socket buffer space.
1111 	 *
1112 	 * We can't scale down too much or we will blow up atomic packet
1113 	 * operations.
1114 	 */
1115 	if (to > *hiwat && to > MCLBYTES && new > max) {
1116 		to = to * max / new;
1117 		if (to < MCLBYTES)
1118 			to = MCLBYTES;
1119 	}
1120 	uip->ui_sbsize = new;
1121 	*hiwat = to;
1122 	spin_unlock(&uip->ui_lock);
1123 	return (1);
1124 }
1125 
1126