xref: /dragonfly/sys/kern/kern_resource.c (revision e0eb7cf0)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
35  * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysmsg.h>
41 #include <sys/file.h>
42 #include <sys/kern_syscall.h>
43 #include <sys/kernel.h>
44 #include <sys/resourcevar.h>
45 #include <sys/malloc.h>
46 #include <sys/proc.h>
47 #include <sys/priv.h>
48 #include <sys/time.h>
49 #include <sys/lockf.h>
50 
51 #include <vm/vm.h>
52 #include <vm/vm_param.h>
53 #include <sys/lock.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_map.h>
56 
57 #include <sys/thread2.h>
58 #include <sys/spinlock2.h>
59 
60 static int donice (struct proc *chgp, int n);
61 static int doionice (struct proc *chgp, int n);
62 
63 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
64 #define	UIHASH(uid)	(&uihashtbl[(uid) & uihash])
65 static struct spinlock uihash_lock;
66 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
67 static u_long uihash;		/* size of hash table - 1 */
68 
69 static struct uidinfo	*uilookup (uid_t uid);
70 
71 /*
72  * Resource controls and accounting.
73  */
74 
75 struct getpriority_info {
76 	int low;
77 	int who;
78 };
79 
80 static int getpriority_callback(struct proc *p, void *data);
81 
82 /*
83  * MPALMOSTSAFE
84  */
85 int
86 sys_getpriority(struct sysmsg *sysmsg, const struct getpriority_args *uap)
87 {
88 	struct getpriority_info info;
89 	thread_t curtd = curthread;
90 	struct proc *curp = curproc;
91 	struct proc *p;
92 	struct pgrp *pg;
93 	int low = PRIO_MAX + 1;
94 	int who = uap->who;
95 	int error;
96 
97 	switch (uap->which) {
98 	case PRIO_PROCESS:
99 		if (who == 0) {
100 			low = curp->p_nice;
101 		} else {
102 			p = pfind(who);
103 			if (p) {
104 				lwkt_gettoken_shared(&p->p_token);
105 				if (PRISON_CHECK(curtd->td_ucred, p->p_ucred))
106 					low = p->p_nice;
107 				lwkt_reltoken(&p->p_token);
108 				PRELE(p);
109 			}
110 		}
111 		break;
112 	case PRIO_PGRP:
113 		if (who == 0) {
114 			lwkt_gettoken_shared(&curp->p_token);
115 			pg = curp->p_pgrp;
116 			pgref(pg);
117 			lwkt_reltoken(&curp->p_token);
118 		} else if ((pg = pgfind(who)) == NULL) {
119 			break;
120 		} /* else ref held from pgfind */
121 
122 		lwkt_gettoken_shared(&pg->pg_token);
123 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
124 			if (PRISON_CHECK(curtd->td_ucred, p->p_ucred) &&
125 			    p->p_nice < low) {
126 				low = p->p_nice;
127 			}
128 		}
129 		lwkt_reltoken(&pg->pg_token);
130 		pgrel(pg);
131 		break;
132 	case PRIO_USER:
133 		if (who == 0)
134 			who = curtd->td_ucred->cr_uid;
135 		info.low = low;
136 		info.who = who;
137 		allproc_scan(getpriority_callback, &info, 0);
138 		low = info.low;
139 		break;
140 
141 	default:
142 		error = EINVAL;
143 		goto done;
144 	}
145 	if (low == PRIO_MAX + 1) {
146 		error = ESRCH;
147 		goto done;
148 	}
149 	sysmsg->sysmsg_result = low;
150 	error = 0;
151 done:
152 	return (error);
153 }
154 
155 /*
156  * Figure out the current lowest nice priority for processes owned
157  * by the specified user.
158  */
159 static
160 int
161 getpriority_callback(struct proc *p, void *data)
162 {
163 	struct getpriority_info *info = data;
164 
165 	lwkt_gettoken_shared(&p->p_token);
166 	if (PRISON_CHECK(curthread->td_ucred, p->p_ucred) &&
167 	    p->p_ucred->cr_uid == info->who &&
168 	    p->p_nice < info->low) {
169 		info->low = p->p_nice;
170 	}
171 	lwkt_reltoken(&p->p_token);
172 	return(0);
173 }
174 
175 struct setpriority_info {
176 	int prio;
177 	int who;
178 	int error;
179 	int found;
180 };
181 
182 static int setpriority_callback(struct proc *p, void *data);
183 
184 /*
185  * MPALMOSTSAFE
186  */
187 int
188 sys_setpriority(struct sysmsg *sysmsg, const struct setpriority_args *uap)
189 {
190 	struct setpriority_info info;
191 	thread_t curtd = curthread;
192 	struct proc *curp = curproc;
193 	struct proc *p;
194 	struct pgrp *pg;
195 	int found = 0, error = 0;
196 	int who = uap->who;
197 
198 	switch (uap->which) {
199 	case PRIO_PROCESS:
200 		if (who == 0) {
201 			lwkt_gettoken(&curp->p_token);
202 			error = donice(curp, uap->prio);
203 			found++;
204 			lwkt_reltoken(&curp->p_token);
205 		} else {
206 			p = pfind(who);
207 			if (p) {
208 				lwkt_gettoken(&p->p_token);
209 				if (PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
210 					error = donice(p, uap->prio);
211 					found++;
212 				}
213 				lwkt_reltoken(&p->p_token);
214 				PRELE(p);
215 			}
216 		}
217 		break;
218 	case PRIO_PGRP:
219 		if (who == 0) {
220 			lwkt_gettoken_shared(&curp->p_token);
221 			pg = curp->p_pgrp;
222 			pgref(pg);
223 			lwkt_reltoken(&curp->p_token);
224 		} else if ((pg = pgfind(who)) == NULL) {
225 			break;
226 		} /* else ref held from pgfind */
227 
228 		lwkt_gettoken(&pg->pg_token);
229 restart:
230 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
231 			PHOLD(p);
232 			lwkt_gettoken(&p->p_token);
233 			if (p->p_pgrp == pg &&
234 			    PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
235 				error = donice(p, uap->prio);
236 				found++;
237 			}
238 			lwkt_reltoken(&p->p_token);
239 			if (p->p_pgrp != pg) {
240 				PRELE(p);
241 				goto restart;
242 			}
243 			PRELE(p);
244 		}
245 		lwkt_reltoken(&pg->pg_token);
246 		pgrel(pg);
247 		break;
248 	case PRIO_USER:
249 		if (who == 0)
250 			who = curtd->td_ucred->cr_uid;
251 		info.prio = uap->prio;
252 		info.who = who;
253 		info.error = 0;
254 		info.found = 0;
255 		allproc_scan(setpriority_callback, &info, 0);
256 		error = info.error;
257 		found = info.found;
258 		break;
259 	default:
260 		error = EINVAL;
261 		found = 1;
262 		break;
263 	}
264 
265 	if (found == 0)
266 		error = ESRCH;
267 	return (error);
268 }
269 
270 static
271 int
272 setpriority_callback(struct proc *p, void *data)
273 {
274 	struct setpriority_info *info = data;
275 	int error;
276 
277 	lwkt_gettoken(&p->p_token);
278 	if (p->p_ucred->cr_uid == info->who &&
279 	    PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
280 		error = donice(p, info->prio);
281 		if (error)
282 			info->error = error;
283 		++info->found;
284 	}
285 	lwkt_reltoken(&p->p_token);
286 	return(0);
287 }
288 
289 /*
290  * Caller must hold chgp->p_token
291  */
292 static int
293 donice(struct proc *chgp, int n)
294 {
295 	struct ucred *cr = curthread->td_ucred;
296 	struct lwp *lp;
297 
298 	if (cr->cr_uid && cr->cr_ruid &&
299 	    cr->cr_uid != chgp->p_ucred->cr_uid &&
300 	    cr->cr_ruid != chgp->p_ucred->cr_uid)
301 		return (EPERM);
302 	if (n > PRIO_MAX)
303 		n = PRIO_MAX;
304 	if (n < PRIO_MIN)
305 		n = PRIO_MIN;
306 	if (n < chgp->p_nice && priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
307 		return (EACCES);
308 	chgp->p_nice = n;
309 	FOREACH_LWP_IN_PROC(lp, chgp) {
310 		LWPHOLD(lp);
311 		chgp->p_usched->resetpriority(lp);
312 		LWPRELE(lp);
313 	}
314 	return (0);
315 }
316 
317 
318 struct ioprio_get_info {
319 	int high;
320 	int who;
321 };
322 
323 static int ioprio_get_callback(struct proc *p, void *data);
324 
325 /*
326  * MPALMOSTSAFE
327  */
328 int
329 sys_ioprio_get(struct sysmsg *sysmsg, const struct ioprio_get_args *uap)
330 {
331 	struct ioprio_get_info info;
332 	thread_t curtd = curthread;
333 	struct proc *curp = curproc;
334 	struct proc *p;
335 	struct pgrp *pg;
336 	int high = IOPRIO_MIN-2;
337 	int who = uap->who;
338 	int error;
339 
340 	switch (uap->which) {
341 	case PRIO_PROCESS:
342 		if (who == 0) {
343 			high = curp->p_ionice;
344 		} else {
345 			p = pfind(who);
346 			if (p) {
347 				lwkt_gettoken_shared(&p->p_token);
348 				if (PRISON_CHECK(curtd->td_ucred, p->p_ucred))
349 					high = p->p_ionice;
350 				lwkt_reltoken(&p->p_token);
351 				PRELE(p);
352 			}
353 		}
354 		break;
355 	case PRIO_PGRP:
356 		if (who == 0) {
357 			lwkt_gettoken_shared(&curp->p_token);
358 			pg = curp->p_pgrp;
359 			pgref(pg);
360 			lwkt_reltoken(&curp->p_token);
361 		} else if ((pg = pgfind(who)) == NULL) {
362 			break;
363 		} /* else ref held from pgfind */
364 
365 		lwkt_gettoken_shared(&pg->pg_token);
366 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
367 			if (PRISON_CHECK(curtd->td_ucred, p->p_ucred) &&
368 			    p->p_nice > high)
369 				high = p->p_ionice;
370 		}
371 		lwkt_reltoken(&pg->pg_token);
372 		pgrel(pg);
373 		break;
374 	case PRIO_USER:
375 		if (who == 0)
376 			who = curtd->td_ucred->cr_uid;
377 		info.high = high;
378 		info.who = who;
379 		allproc_scan(ioprio_get_callback, &info, 0);
380 		high = info.high;
381 		break;
382 	default:
383 		error = EINVAL;
384 		goto done;
385 	}
386 	if (high == IOPRIO_MIN-2) {
387 		error = ESRCH;
388 		goto done;
389 	}
390 	sysmsg->sysmsg_result = high;
391 	error = 0;
392 done:
393 	return (error);
394 }
395 
396 /*
397  * Figure out the current lowest nice priority for processes owned
398  * by the specified user.
399  */
400 static
401 int
402 ioprio_get_callback(struct proc *p, void *data)
403 {
404 	struct ioprio_get_info *info = data;
405 
406 	lwkt_gettoken_shared(&p->p_token);
407 	if (PRISON_CHECK(curthread->td_ucred, p->p_ucred) &&
408 	    p->p_ucred->cr_uid == info->who &&
409 	    p->p_ionice > info->high) {
410 		info->high = p->p_ionice;
411 	}
412 	lwkt_reltoken(&p->p_token);
413 	return(0);
414 }
415 
416 
417 struct ioprio_set_info {
418 	int prio;
419 	int who;
420 	int error;
421 	int found;
422 };
423 
424 static int ioprio_set_callback(struct proc *p, void *data);
425 
426 /*
427  * MPALMOSTSAFE
428  */
429 int
430 sys_ioprio_set(struct sysmsg *sysmsg, const struct ioprio_set_args *uap)
431 {
432 	struct ioprio_set_info info;
433 	thread_t curtd = curthread;
434 	struct proc *curp = curproc;
435 	struct proc *p;
436 	struct pgrp *pg;
437 	int found = 0, error = 0;
438 	int who = uap->who;
439 
440 	switch (uap->which) {
441 	case PRIO_PROCESS:
442 		if (who == 0) {
443 			lwkt_gettoken(&curp->p_token);
444 			error = doionice(curp, uap->prio);
445 			lwkt_reltoken(&curp->p_token);
446 			found++;
447 		} else {
448 			p = pfind(who);
449 			if (p) {
450 				lwkt_gettoken(&p->p_token);
451 				if (PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
452 					error = doionice(p, uap->prio);
453 					found++;
454 				}
455 				lwkt_reltoken(&p->p_token);
456 				PRELE(p);
457 			}
458 		}
459 		break;
460 	case PRIO_PGRP:
461 		if (who == 0) {
462 			lwkt_gettoken_shared(&curp->p_token);
463 			pg = curp->p_pgrp;
464 			pgref(pg);
465 			lwkt_reltoken(&curp->p_token);
466 		} else if ((pg = pgfind(who)) == NULL) {
467 			break;
468 		} /* else ref held from pgfind */
469 
470 		lwkt_gettoken(&pg->pg_token);
471 restart:
472 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
473 			PHOLD(p);
474 			lwkt_gettoken(&p->p_token);
475 			if (p->p_pgrp == pg &&
476 			    PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
477 				error = doionice(p, uap->prio);
478 				found++;
479 			}
480 			lwkt_reltoken(&p->p_token);
481 			if (p->p_pgrp != pg) {
482 				PRELE(p);
483 				goto restart;
484 			}
485 			PRELE(p);
486 		}
487 		lwkt_reltoken(&pg->pg_token);
488 		pgrel(pg);
489 		break;
490 	case PRIO_USER:
491 		if (who == 0)
492 			who = curtd->td_ucred->cr_uid;
493 		info.prio = uap->prio;
494 		info.who = who;
495 		info.error = 0;
496 		info.found = 0;
497 		allproc_scan(ioprio_set_callback, &info, 0);
498 		error = info.error;
499 		found = info.found;
500 		break;
501 	default:
502 		error = EINVAL;
503 		found = 1;
504 		break;
505 	}
506 
507 	if (found == 0)
508 		error = ESRCH;
509 	return (error);
510 }
511 
512 static
513 int
514 ioprio_set_callback(struct proc *p, void *data)
515 {
516 	struct ioprio_set_info *info = data;
517 	int error;
518 
519 	lwkt_gettoken(&p->p_token);
520 	if (p->p_ucred->cr_uid == info->who &&
521 	    PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
522 		error = doionice(p, info->prio);
523 		if (error)
524 			info->error = error;
525 		++info->found;
526 	}
527 	lwkt_reltoken(&p->p_token);
528 	return(0);
529 }
530 
531 static int
532 doionice(struct proc *chgp, int n)
533 {
534 	struct ucred *cr = curthread->td_ucred;
535 
536 	if (cr->cr_uid && cr->cr_ruid &&
537 	    cr->cr_uid != chgp->p_ucred->cr_uid &&
538 	    cr->cr_ruid != chgp->p_ucred->cr_uid)
539 		return (EPERM);
540 	if (n > IOPRIO_MAX)
541 		n = IOPRIO_MAX;
542 	if (n < IOPRIO_MIN)
543 		n = IOPRIO_MIN;
544 	if (n < chgp->p_ionice &&
545 	    priv_check_cred(cr, PRIV_SCHED_SETPRIORITY, 0))
546 		return (EACCES);
547 	chgp->p_ionice = n;
548 
549 	return (0);
550 
551 }
552 
553 /*
554  * MPALMOSTSAFE
555  */
556 int
557 sys_lwp_rtprio(struct sysmsg *sysmsg, const struct lwp_rtprio_args *uap)
558 {
559 	struct ucred *cr = curthread->td_ucred;
560 	struct proc *p;
561 	struct lwp *lp;
562 	struct rtprio rtp;
563 	int error;
564 
565 	error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
566 	if (error)
567 		return error;
568 	if (uap->pid < 0)
569 		return EINVAL;
570 
571 	if (uap->pid == 0) {
572 		p = curproc;
573 		PHOLD(p);
574 	} else {
575 		p = pfind(uap->pid);
576 	}
577 	if (p == NULL) {
578 		error = ESRCH;
579 		goto done;
580 	}
581 	lwkt_gettoken(&p->p_token);
582 
583 	if (uap->tid < -1) {
584 		error = EINVAL;
585 		goto done;
586 	}
587 	if (uap->tid == -1) {
588 		/*
589 		 * sadly, tid can be 0 so we can't use 0 here
590 		 * like sys_rtprio()
591 		 */
592 		lp = curthread->td_lwp;
593 	} else {
594 		lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, uap->tid);
595 		if (lp == NULL) {
596 			error = ESRCH;
597 			goto done;
598 		}
599 	}
600 
601 	/*
602 	 * Make sure that this lwp is not ripped if any of the following
603 	 * code blocks, e.g. copyout.
604 	 */
605 	LWPHOLD(lp);
606 	switch (uap->function) {
607 	case RTP_LOOKUP:
608 		error = copyout(&lp->lwp_rtprio, uap->rtp,
609 				sizeof(struct rtprio));
610 		break;
611 	case RTP_SET:
612 		if (cr->cr_uid && cr->cr_ruid &&
613 		    cr->cr_uid != p->p_ucred->cr_uid &&
614 		    cr->cr_ruid != p->p_ucred->cr_uid) {
615 			error = EPERM;
616 			break;
617 		}
618 		/* disallow setting rtprio in most cases if not superuser */
619 		if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
620 			/* can't set someone else's */
621 			if (uap->pid) { /* XXX */
622 				error = EPERM;
623 				break;
624 			}
625 			/* can't set realtime priority */
626 /*
627  * Realtime priority has to be restricted for reasons which should be
628  * obvious. However, for idle priority, there is a potential for
629  * system deadlock if an idleprio process gains a lock on a resource
630  * that other processes need (and the idleprio process can't run
631  * due to a CPU-bound normal process). Fix me! XXX
632  */
633  			if (RTP_PRIO_IS_REALTIME(rtp.type)) {
634 				error = EPERM;
635 				break;
636 			}
637 		}
638 		switch (rtp.type) {
639 #ifdef RTP_PRIO_FIFO
640 		case RTP_PRIO_FIFO:
641 #endif
642 		case RTP_PRIO_REALTIME:
643 		case RTP_PRIO_NORMAL:
644 		case RTP_PRIO_IDLE:
645 			if (rtp.prio > RTP_PRIO_MAX) {
646 				error = EINVAL;
647 			} else {
648 				lp->lwp_rtprio = rtp;
649 				error = 0;
650 			}
651 			break;
652 		default:
653 			error = EINVAL;
654 			break;
655 		}
656 		break;
657 	default:
658 		error = EINVAL;
659 		break;
660 	}
661 	LWPRELE(lp);
662 
663 done:
664 	if (p) {
665 		lwkt_reltoken(&p->p_token);
666 		PRELE(p);
667 	}
668 	return (error);
669 }
670 
671 /*
672  * Set realtime priority
673  *
674  * MPALMOSTSAFE
675  */
676 int
677 sys_rtprio(struct sysmsg *sysmsg, const struct rtprio_args *uap)
678 {
679 	struct ucred *cr = curthread->td_ucred;
680 	struct proc *p;
681 	struct lwp *lp;
682 	struct rtprio rtp;
683 	int error;
684 
685 	error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
686 	if (error)
687 		return (error);
688 
689 	if (uap->pid == 0) {
690 		p = curproc;
691 		PHOLD(p);
692 	} else {
693 		p = pfind(uap->pid);
694 	}
695 
696 	if (p == NULL) {
697 		error = ESRCH;
698 		goto done;
699 	}
700 	lwkt_gettoken(&p->p_token);
701 
702 	/* XXX lwp */
703 	lp = FIRST_LWP_IN_PROC(p);
704 	switch (uap->function) {
705 	case RTP_LOOKUP:
706 		error = copyout(&lp->lwp_rtprio, uap->rtp,
707 				sizeof(struct rtprio));
708 		break;
709 	case RTP_SET:
710 		if (cr->cr_uid && cr->cr_ruid &&
711 		    cr->cr_uid != p->p_ucred->cr_uid &&
712 		    cr->cr_ruid != p->p_ucred->cr_uid) {
713 			error = EPERM;
714 			break;
715 		}
716 		/* disallow setting rtprio in most cases if not superuser */
717 		if (priv_check_cred(cr, PRIV_SCHED_RTPRIO, 0)) {
718 			/* can't set someone else's */
719 			if (uap->pid) {
720 				error = EPERM;
721 				break;
722 			}
723 			/* can't set realtime priority */
724 /*
725  * Realtime priority has to be restricted for reasons which should be
726  * obvious. However, for idle priority, there is a potential for
727  * system deadlock if an idleprio process gains a lock on a resource
728  * that other processes need (and the idleprio process can't run
729  * due to a CPU-bound normal process). Fix me! XXX
730  */
731 			if (RTP_PRIO_IS_REALTIME(rtp.type)) {
732 				error = EPERM;
733 				break;
734 			}
735 		}
736 		switch (rtp.type) {
737 #ifdef RTP_PRIO_FIFO
738 		case RTP_PRIO_FIFO:
739 #endif
740 		case RTP_PRIO_REALTIME:
741 		case RTP_PRIO_NORMAL:
742 		case RTP_PRIO_IDLE:
743 			if (rtp.prio > RTP_PRIO_MAX) {
744 				error = EINVAL;
745 				break;
746 			}
747 			lp->lwp_rtprio = rtp;
748 			error = 0;
749 			break;
750 		default:
751 			error = EINVAL;
752 			break;
753 		}
754 		break;
755 	default:
756 		error = EINVAL;
757 		break;
758 	}
759 done:
760 	if (p) {
761 		lwkt_reltoken(&p->p_token);
762 		PRELE(p);
763 	}
764 
765 	return (error);
766 }
767 
768 int
769 sys_setrlimit(struct sysmsg *sysmsg, const struct __setrlimit_args *uap)
770 {
771 	struct rlimit alim;
772 	int error;
773 
774 	error = copyin(uap->rlp, &alim, sizeof(alim));
775 	if (error)
776 		return (error);
777 
778 	error = kern_setrlimit(uap->which, &alim);
779 
780 	return (error);
781 }
782 
783 int
784 sys_getrlimit(struct sysmsg *sysmsg, const struct __getrlimit_args *uap)
785 {
786 	struct rlimit lim;
787 	int error;
788 
789 	error = kern_getrlimit(uap->which, &lim);
790 
791 	if (error == 0)
792 		error = copyout(&lim, uap->rlp, sizeof(*uap->rlp));
793 	return error;
794 }
795 
796 /*
797  * Transform the running time and tick information in lwp lp's thread into user,
798  * system, and interrupt time usage.
799  *
800  * Since we are limited to statclock tick granularity this is a statisical
801  * calculation which will be correct over the long haul, but should not be
802  * expected to measure fine grained deltas.
803  *
804  * It is possible to catch a lwp in the midst of being created, so
805  * check whether lwp_thread is NULL or not.
806  */
807 void
808 calcru(struct lwp *lp, struct timeval *up, struct timeval *sp)
809 {
810 	struct thread *td;
811 
812 	/*
813 	 * Calculate at the statclock level.  YYY if the thread is owned by
814 	 * another cpu we need to forward the request to the other cpu, or
815 	 * have a token to interlock the information in order to avoid racing
816 	 * thread destruction.
817 	 */
818 	if ((td = lp->lwp_thread) != NULL) {
819 		crit_enter();
820 		up->tv_sec = td->td_uticks / 1000000;
821 		up->tv_usec = td->td_uticks % 1000000;
822 		sp->tv_sec = td->td_sticks / 1000000;
823 		sp->tv_usec = td->td_sticks % 1000000;
824 		crit_exit();
825 	}
826 }
827 
828 /*
829  * Aggregate resource statistics of all lwps of a process.
830  *
831  * proc.p_ru keeps track of all statistics directly related to a proc.  This
832  * consists of RSS usage and nswap information and aggregate numbers for all
833  * former lwps of this proc.
834  *
835  * proc.p_cru is the sum of all stats of reaped children.
836  *
837  * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
838  * packet, scheduler switch or page fault counts, etc.  This information gets
839  * added to lwp.lwp_proc.p_ru when the lwp exits.
840  */
841 void
842 calcru_proc(struct proc *p, struct rusage *ru)
843 {
844 	struct timeval upt, spt;
845 	long *rip1, *rip2;
846 	struct lwp *lp;
847 
848 	*ru = p->p_ru;
849 
850 	FOREACH_LWP_IN_PROC(lp, p) {
851 		calcru(lp, &upt, &spt);
852 		timevaladd(&ru->ru_utime, &upt);
853 		timevaladd(&ru->ru_stime, &spt);
854 		for (rip1 = &ru->ru_first, rip2 = &lp->lwp_ru.ru_first;
855 		     rip1 <= &ru->ru_last;
856 		     rip1++, rip2++)
857 			*rip1 += *rip2;
858 	}
859 }
860 
861 
862 /*
863  * MPALMOSTSAFE
864  */
865 int
866 sys_getrusage(struct sysmsg *sysmsg, const struct getrusage_args *uap)
867 {
868 	struct proc *p = curproc;
869 	struct rusage ru;
870 	struct rusage *rup;
871 	int error;
872 
873 	lwkt_gettoken(&p->p_token);
874 
875 	switch (uap->who) {
876 	case RUSAGE_SELF:
877 		rup = &ru;
878 		calcru_proc(p, rup);
879 		error = 0;
880 		break;
881 	case RUSAGE_CHILDREN:
882 		rup = &p->p_cru;
883 		error = 0;
884 		break;
885 	default:
886 		error = EINVAL;
887 		break;
888 	}
889 	lwkt_reltoken(&p->p_token);
890 
891 	if (error == 0)
892 		error = copyout(rup, uap->rusage, sizeof(struct rusage));
893 	return (error);
894 }
895 
896 void
897 ruadd(struct rusage *ru, struct rusage *ru2)
898 {
899 	long *ip, *ip2;
900 	int i;
901 
902 	timevaladd(&ru->ru_utime, &ru2->ru_utime);
903 	timevaladd(&ru->ru_stime, &ru2->ru_stime);
904 	if (ru->ru_maxrss < ru2->ru_maxrss)
905 		ru->ru_maxrss = ru2->ru_maxrss;
906 	ip = &ru->ru_first; ip2 = &ru2->ru_first;
907 	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
908 		*ip++ += *ip2++;
909 }
910 
911 /*
912  * Find the uidinfo structure for a uid.  This structure is used to
913  * track the total resource consumption (process count, socket buffer
914  * size, etc.) for the uid and impose limits.
915  */
916 void
917 uihashinit(void)
918 {
919 	spin_init(&uihash_lock, "uihashinit");
920 	uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
921 }
922 
923 /*
924  * NOTE: Must be called with uihash_lock held
925  */
926 static struct uidinfo *
927 uilookup(uid_t uid)
928 {
929 	struct	uihashhead *uipp;
930 	struct	uidinfo *uip;
931 
932 	uipp = UIHASH(uid);
933 	LIST_FOREACH(uip, uipp, ui_hash) {
934 		if (uip->ui_uid == uid)
935 			break;
936 	}
937 	return (uip);
938 }
939 
940 /*
941  * Helper function to creat ea uid that could not be found.
942  * This function will properly deal with races.
943  *
944  * WARNING! Should only be used by this source file and by the proc0
945  *	    creation code.
946  */
947 struct uidinfo *
948 uicreate(uid_t uid)
949 {
950 	struct	uidinfo *uip, *tmp;
951 
952 	/*
953 	 * Allocate space and check for a race
954 	 */
955 	uip = kmalloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
956 
957 	/*
958 	 * Initialize structure and enter it into the hash table
959 	 */
960 	spin_init(&uip->ui_lock, "uicreate");
961 	uip->ui_uid = uid;
962 	uip->ui_ref = 1;	/* we're returning a ref */
963 	varsymset_init(&uip->ui_varsymset, NULL);
964 	uip->ui_pcpu = kmalloc(sizeof(*uip->ui_pcpu) * ncpus,
965 			       M_UIDINFO, M_WAITOK | M_ZERO);
966 
967 	/*
968 	 * Somebody may have already created the uidinfo for this
969 	 * uid. If so, return that instead.
970 	 */
971 	spin_lock(&uihash_lock);
972 	tmp = uilookup(uid);
973 	if (tmp != NULL) {
974 		uihold(tmp);
975 		spin_unlock(&uihash_lock);
976 
977 		spin_uninit(&uip->ui_lock);
978 		varsymset_clean(&uip->ui_varsymset);
979 		kfree(uip->ui_pcpu, M_UIDINFO);
980 		kfree(uip, M_UIDINFO);
981 		uip = tmp;
982 	} else {
983 		LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
984 		spin_unlock(&uihash_lock);
985 	}
986 	return (uip);
987 }
988 
989 /*
990  * Find the uidinfo for a uid, creating one if necessary
991  */
992 struct uidinfo *
993 uifind(uid_t uid)
994 {
995 	struct uidinfo *uip;
996 	thread_t td = curthread;
997 
998 	if (td->td_ucred) {
999 		uip = td->td_ucred->cr_uidinfo;
1000 		if (uip->ui_uid == uid) {
1001 			uihold(uip);
1002 			return uip;
1003 		}
1004 		uip = td->td_ucred->cr_ruidinfo;
1005 		if (uip->ui_uid == uid) {
1006 			uihold(uip);
1007 			return uip;
1008 		}
1009 	}
1010 
1011 	spin_lock_shared(&uihash_lock);
1012 	uip = uilookup(uid);
1013 	if (uip == NULL) {
1014 		spin_unlock_shared(&uihash_lock);
1015 		uip = uicreate(uid);
1016 	} else {
1017 		uihold(uip);
1018 		spin_unlock_shared(&uihash_lock);
1019 	}
1020 	return (uip);
1021 }
1022 
1023 /*
1024  * Helper funtion to remove a uidinfo whos reference count may
1025  * have transitioned to 0.  The reference count is likely 0
1026  * on-call.
1027  */
1028 static __inline void
1029 uifree(uid_t uid)
1030 {
1031 	struct uidinfo *uip;
1032 
1033 	/*
1034 	 * If we are still the only holder after acquiring the uihash_lock
1035 	 * we can safely unlink the uip and destroy it.  Otherwise we lost
1036 	 * a race and must fail.
1037 	 */
1038 	spin_lock(&uihash_lock);
1039 	uip = uilookup(uid);
1040 	if (uip && uip->ui_ref == 0) {
1041 		LIST_REMOVE(uip, ui_hash);
1042 		spin_unlock(&uihash_lock);
1043 
1044 		/*
1045 		 * The uip is now orphaned and we can destroy it at our
1046 		 * leisure.
1047 		 */
1048 		if (uip->ui_sbsize != 0)
1049 			kprintf("freeing uidinfo: uid = %d, sbsize = %jd\n",
1050 			    uip->ui_uid, (intmax_t)uip->ui_sbsize);
1051 		if (uip->ui_proccnt != 0)
1052 			kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1053 			    uip->ui_uid, uip->ui_proccnt);
1054 
1055 		varsymset_clean(&uip->ui_varsymset);
1056 		lockuninit(&uip->ui_varsymset.vx_lock);
1057 		spin_uninit(&uip->ui_lock);
1058 		kfree(uip->ui_pcpu, M_UIDINFO);
1059 		kfree(uip, M_UIDINFO);
1060 	} else {
1061 		spin_unlock(&uihash_lock);
1062 	}
1063 }
1064 
1065 /*
1066  * Bump the ref count
1067  */
1068 void
1069 uihold(struct uidinfo *uip)
1070 {
1071 	KKASSERT(uip->ui_ref >= 0);
1072 	atomic_add_int(&uip->ui_ref, 1);
1073 }
1074 
1075 /*
1076  * Drop the ref count.  The last-drop code still needs to remove the
1077  * uidinfo from the hash table which it does by re-looking-it-up.
1078  *
1079  * NOTE: The uip can be ripped out from under us after the fetchadd.
1080  */
1081 void
1082 uidrop(struct uidinfo *uip)
1083 {
1084 	uid_t uid;
1085 
1086 	KKASSERT(uip->ui_ref > 0);
1087 	uid = uip->ui_uid;
1088 	cpu_ccfence();
1089 	if (atomic_fetchadd_int(&uip->ui_ref, -1) == 1) {
1090 		uifree(uid);
1091 	}
1092 }
1093 
1094 void
1095 uireplace(struct uidinfo **puip, struct uidinfo *nuip)
1096 {
1097 	uidrop(*puip);
1098 	*puip = nuip;
1099 }
1100 
1101 /*
1102  * Change the count associated with number of processes
1103  * a given user is using.
1104  *
1105  * NOTE: When 'max' is 0, don't enforce a limit.
1106  *
1107  * NOTE: Due to concurrency, the count can sometimes exceed the max
1108  *	 by a small amount.
1109  */
1110 int
1111 chgproccnt(struct uidinfo *uip, int diff, int max)
1112 {
1113 	int ret;
1114 
1115 	/* don't allow them to exceed max, but allow subtraction */
1116 	if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
1117 		ret = 0;
1118 	} else {
1119 		atomic_add_long(&uip->ui_proccnt, diff);
1120 		if (uip->ui_proccnt < 0)
1121 			kprintf("negative proccnt for uid = %d\n", uip->ui_uid);
1122 		ret = 1;
1123 	}
1124 	return ret;
1125 }
1126 
1127 /*
1128  * Change the total socket buffer size a user has used.
1129  */
1130 int
1131 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
1132 {
1133 	rlim_t new;
1134 
1135 	rlim_t sbsize;
1136 
1137 	sbsize = atomic_fetchadd_long(&uip->ui_sbsize, to - *hiwat);
1138 	new = sbsize + to - *hiwat;
1139 	KKASSERT(new >= 0);
1140 
1141 	/*
1142 	 * If we are trying to increase the socket buffer size
1143 	 * Scale down the hi water mark when we exceed the user's
1144 	 * allowed socket buffer space.
1145 	 *
1146 	 * We can't scale down too much or we will blow up atomic packet
1147 	 * operations.
1148 	 */
1149 	if (to > *hiwat && to > MCLBYTES && new > max) {
1150 		to = to * max / new;
1151 		if (to < MCLBYTES)
1152 			to = MCLBYTES;
1153 	}
1154 	*hiwat = to;
1155 	return (1);
1156 }
1157