xref: /dragonfly/sys/kern/kern_resource.c (revision 2b3f93ea)
1 /*-
2  * Copyright (c) 1982, 1986, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
35  * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysmsg.h>
41 #include <sys/file.h>
42 #include <sys/kernel.h>
43 #include <sys/resourcevar.h>
44 #include <sys/malloc.h>
45 #include <sys/proc.h>
46 #include <sys/caps.h>
47 #include <sys/time.h>
48 #include <sys/lockf.h>
49 
50 #include <vm/vm.h>
51 #include <vm/vm_param.h>
52 #include <sys/lock.h>
53 #include <vm/pmap.h>
54 #include <vm/vm_map.h>
55 
56 #include <sys/thread2.h>
57 #include <sys/spinlock2.h>
58 
59 static int donice (struct proc *chgp, int n);
60 static int doionice (struct proc *chgp, int n);
61 
62 static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
63 #define	UIHASH(uid)	(&uihashtbl[(uid) & uihash])
64 static struct spinlock uihash_lock;
65 static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
66 static u_long uihash;		/* size of hash table - 1 */
67 
68 static struct uidinfo	*uilookup (uid_t uid);
69 
70 /*
71  * Resource controls and accounting.
72  */
73 
74 struct getpriority_info {
75 	int low;
76 	int who;
77 };
78 
79 static int getpriority_callback(struct proc *p, void *data);
80 
81 /*
82  * MPALMOSTSAFE
83  */
84 int
sys_getpriority(struct sysmsg * sysmsg,const struct getpriority_args * uap)85 sys_getpriority(struct sysmsg *sysmsg, const struct getpriority_args *uap)
86 {
87 	struct getpriority_info info;
88 	thread_t curtd = curthread;
89 	struct proc *curp = curproc;
90 	struct proc *p;
91 	struct pgrp *pg;
92 	int low = PRIO_MAX + 1;
93 	int who = uap->who;
94 	int error;
95 
96 	switch (uap->which) {
97 	case PRIO_PROCESS:
98 		if (who == 0) {
99 			low = curp->p_nice;
100 		} else {
101 			p = pfind(who);
102 			if (p) {
103 				lwkt_gettoken_shared(&p->p_token);
104 				if (PRISON_CHECK(curtd->td_ucred, p->p_ucred))
105 					low = p->p_nice;
106 				lwkt_reltoken(&p->p_token);
107 				PRELE(p);
108 			}
109 		}
110 		break;
111 	case PRIO_PGRP:
112 		if (who == 0) {
113 			lwkt_gettoken_shared(&curp->p_token);
114 			pg = curp->p_pgrp;
115 			pgref(pg);
116 			lwkt_reltoken(&curp->p_token);
117 		} else if ((pg = pgfind(who)) == NULL) {
118 			break;
119 		} /* else ref held from pgfind */
120 
121 		lwkt_gettoken_shared(&pg->pg_token);
122 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
123 			if (PRISON_CHECK(curtd->td_ucred, p->p_ucred) &&
124 			    p->p_nice < low) {
125 				low = p->p_nice;
126 			}
127 		}
128 		lwkt_reltoken(&pg->pg_token);
129 		pgrel(pg);
130 		break;
131 	case PRIO_USER:
132 		if (who == 0)
133 			who = curtd->td_ucred->cr_uid;
134 		info.low = low;
135 		info.who = who;
136 		allproc_scan(getpriority_callback, &info, 0);
137 		low = info.low;
138 		break;
139 
140 	default:
141 		error = EINVAL;
142 		goto done;
143 	}
144 	if (low == PRIO_MAX + 1) {
145 		error = ESRCH;
146 		goto done;
147 	}
148 	sysmsg->sysmsg_result = low;
149 	error = 0;
150 done:
151 	return (error);
152 }
153 
154 /*
155  * Figure out the current lowest nice priority for processes owned
156  * by the specified user.
157  */
158 static
159 int
getpriority_callback(struct proc * p,void * data)160 getpriority_callback(struct proc *p, void *data)
161 {
162 	struct getpriority_info *info = data;
163 
164 	lwkt_gettoken_shared(&p->p_token);
165 	if (PRISON_CHECK(curthread->td_ucred, p->p_ucred) &&
166 	    p->p_ucred->cr_uid == info->who &&
167 	    p->p_nice < info->low) {
168 		info->low = p->p_nice;
169 	}
170 	lwkt_reltoken(&p->p_token);
171 	return(0);
172 }
173 
174 struct setpriority_info {
175 	int prio;
176 	int who;
177 	int error;
178 	int found;
179 };
180 
181 static int setpriority_callback(struct proc *p, void *data);
182 
183 /*
184  * MPALMOSTSAFE
185  */
186 int
sys_setpriority(struct sysmsg * sysmsg,const struct setpriority_args * uap)187 sys_setpriority(struct sysmsg *sysmsg, const struct setpriority_args *uap)
188 {
189 	struct setpriority_info info;
190 	thread_t curtd = curthread;
191 	struct proc *curp = curproc;
192 	struct proc *p;
193 	struct pgrp *pg;
194 	int found = 0, error = 0;
195 	int who = uap->who;
196 
197 	switch (uap->which) {
198 	case PRIO_PROCESS:
199 		if (who == 0) {
200 			lwkt_gettoken(&curp->p_token);
201 			error = donice(curp, uap->prio);
202 			found++;
203 			lwkt_reltoken(&curp->p_token);
204 		} else {
205 			p = pfind(who);
206 			if (p) {
207 				lwkt_gettoken(&p->p_token);
208 				if (PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
209 					error = donice(p, uap->prio);
210 					found++;
211 				}
212 				lwkt_reltoken(&p->p_token);
213 				PRELE(p);
214 			}
215 		}
216 		break;
217 	case PRIO_PGRP:
218 		if (who == 0) {
219 			lwkt_gettoken_shared(&curp->p_token);
220 			pg = curp->p_pgrp;
221 			pgref(pg);
222 			lwkt_reltoken(&curp->p_token);
223 		} else if ((pg = pgfind(who)) == NULL) {
224 			break;
225 		} /* else ref held from pgfind */
226 
227 		lwkt_gettoken(&pg->pg_token);
228 restart:
229 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
230 			PHOLD(p);
231 			lwkt_gettoken(&p->p_token);
232 			if (p->p_pgrp == pg &&
233 			    PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
234 				error = donice(p, uap->prio);
235 				found++;
236 			}
237 			lwkt_reltoken(&p->p_token);
238 			if (p->p_pgrp != pg) {
239 				PRELE(p);
240 				goto restart;
241 			}
242 			PRELE(p);
243 		}
244 		lwkt_reltoken(&pg->pg_token);
245 		pgrel(pg);
246 		break;
247 	case PRIO_USER:
248 		if (who == 0)
249 			who = curtd->td_ucred->cr_uid;
250 		info.prio = uap->prio;
251 		info.who = who;
252 		info.error = 0;
253 		info.found = 0;
254 		allproc_scan(setpriority_callback, &info, 0);
255 		error = info.error;
256 		found = info.found;
257 		break;
258 	default:
259 		error = EINVAL;
260 		found = 1;
261 		break;
262 	}
263 
264 	if (found == 0)
265 		error = ESRCH;
266 	return (error);
267 }
268 
269 static
270 int
setpriority_callback(struct proc * p,void * data)271 setpriority_callback(struct proc *p, void *data)
272 {
273 	struct setpriority_info *info = data;
274 	int error;
275 
276 	lwkt_gettoken(&p->p_token);
277 	if (p->p_ucred->cr_uid == info->who &&
278 	    PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
279 		error = donice(p, info->prio);
280 		if (error)
281 			info->error = error;
282 		++info->found;
283 	}
284 	lwkt_reltoken(&p->p_token);
285 	return(0);
286 }
287 
288 /*
289  * Caller must hold chgp->p_token
290  */
291 static int
donice(struct proc * chgp,int n)292 donice(struct proc *chgp, int n)
293 {
294 	struct ucred *cr = curthread->td_ucred;
295 	struct lwp *lp;
296 
297 	if (cr->cr_uid && cr->cr_ruid &&
298 	    cr->cr_uid != chgp->p_ucred->cr_uid &&
299 	    cr->cr_ruid != chgp->p_ucred->cr_uid)
300 		return (EPERM);
301 	if (n > PRIO_MAX)
302 		n = PRIO_MAX;
303 	if (n < PRIO_MIN)
304 		n = PRIO_MIN;
305 	if (n < chgp->p_nice && caps_priv_check(cr, SYSCAP_NOSCHED))
306 		return (EACCES);
307 	chgp->p_nice = n;
308 	FOREACH_LWP_IN_PROC(lp, chgp) {
309 		LWPHOLD(lp);
310 		chgp->p_usched->resetpriority(lp);
311 		LWPRELE(lp);
312 	}
313 	return (0);
314 }
315 
316 
317 struct ioprio_get_info {
318 	int high;
319 	int who;
320 };
321 
322 static int ioprio_get_callback(struct proc *p, void *data);
323 
324 /*
325  * MPALMOSTSAFE
326  */
327 int
sys_ioprio_get(struct sysmsg * sysmsg,const struct ioprio_get_args * uap)328 sys_ioprio_get(struct sysmsg *sysmsg, const struct ioprio_get_args *uap)
329 {
330 	struct ioprio_get_info info;
331 	thread_t curtd = curthread;
332 	struct proc *curp = curproc;
333 	struct proc *p;
334 	struct pgrp *pg;
335 	int high = IOPRIO_MIN-2;
336 	int who = uap->who;
337 	int error;
338 
339 	switch (uap->which) {
340 	case PRIO_PROCESS:
341 		if (who == 0) {
342 			high = curp->p_ionice;
343 		} else {
344 			p = pfind(who);
345 			if (p) {
346 				lwkt_gettoken_shared(&p->p_token);
347 				if (PRISON_CHECK(curtd->td_ucred, p->p_ucred))
348 					high = p->p_ionice;
349 				lwkt_reltoken(&p->p_token);
350 				PRELE(p);
351 			}
352 		}
353 		break;
354 	case PRIO_PGRP:
355 		if (who == 0) {
356 			lwkt_gettoken_shared(&curp->p_token);
357 			pg = curp->p_pgrp;
358 			pgref(pg);
359 			lwkt_reltoken(&curp->p_token);
360 		} else if ((pg = pgfind(who)) == NULL) {
361 			break;
362 		} /* else ref held from pgfind */
363 
364 		lwkt_gettoken_shared(&pg->pg_token);
365 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
366 			if (PRISON_CHECK(curtd->td_ucred, p->p_ucred) &&
367 			    p->p_nice > high)
368 				high = p->p_ionice;
369 		}
370 		lwkt_reltoken(&pg->pg_token);
371 		pgrel(pg);
372 		break;
373 	case PRIO_USER:
374 		if (who == 0)
375 			who = curtd->td_ucred->cr_uid;
376 		info.high = high;
377 		info.who = who;
378 		allproc_scan(ioprio_get_callback, &info, 0);
379 		high = info.high;
380 		break;
381 	default:
382 		error = EINVAL;
383 		goto done;
384 	}
385 	if (high == IOPRIO_MIN-2) {
386 		error = ESRCH;
387 		goto done;
388 	}
389 	sysmsg->sysmsg_result = high;
390 	error = 0;
391 done:
392 	return (error);
393 }
394 
395 /*
396  * Figure out the current lowest nice priority for processes owned
397  * by the specified user.
398  */
399 static
400 int
ioprio_get_callback(struct proc * p,void * data)401 ioprio_get_callback(struct proc *p, void *data)
402 {
403 	struct ioprio_get_info *info = data;
404 
405 	lwkt_gettoken_shared(&p->p_token);
406 	if (PRISON_CHECK(curthread->td_ucred, p->p_ucred) &&
407 	    p->p_ucred->cr_uid == info->who &&
408 	    p->p_ionice > info->high) {
409 		info->high = p->p_ionice;
410 	}
411 	lwkt_reltoken(&p->p_token);
412 	return(0);
413 }
414 
415 
416 struct ioprio_set_info {
417 	int prio;
418 	int who;
419 	int error;
420 	int found;
421 };
422 
423 static int ioprio_set_callback(struct proc *p, void *data);
424 
425 /*
426  * MPALMOSTSAFE
427  */
428 int
sys_ioprio_set(struct sysmsg * sysmsg,const struct ioprio_set_args * uap)429 sys_ioprio_set(struct sysmsg *sysmsg, const struct ioprio_set_args *uap)
430 {
431 	struct ioprio_set_info info;
432 	thread_t curtd = curthread;
433 	struct proc *curp = curproc;
434 	struct proc *p;
435 	struct pgrp *pg;
436 	int found = 0, error = 0;
437 	int who = uap->who;
438 
439 	switch (uap->which) {
440 	case PRIO_PROCESS:
441 		if (who == 0) {
442 			lwkt_gettoken(&curp->p_token);
443 			error = doionice(curp, uap->prio);
444 			lwkt_reltoken(&curp->p_token);
445 			found++;
446 		} else {
447 			p = pfind(who);
448 			if (p) {
449 				lwkt_gettoken(&p->p_token);
450 				if (PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
451 					error = doionice(p, uap->prio);
452 					found++;
453 				}
454 				lwkt_reltoken(&p->p_token);
455 				PRELE(p);
456 			}
457 		}
458 		break;
459 	case PRIO_PGRP:
460 		if (who == 0) {
461 			lwkt_gettoken_shared(&curp->p_token);
462 			pg = curp->p_pgrp;
463 			pgref(pg);
464 			lwkt_reltoken(&curp->p_token);
465 		} else if ((pg = pgfind(who)) == NULL) {
466 			break;
467 		} /* else ref held from pgfind */
468 
469 		lwkt_gettoken(&pg->pg_token);
470 restart:
471 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
472 			PHOLD(p);
473 			lwkt_gettoken(&p->p_token);
474 			if (p->p_pgrp == pg &&
475 			    PRISON_CHECK(curtd->td_ucred, p->p_ucred)) {
476 				error = doionice(p, uap->prio);
477 				found++;
478 			}
479 			lwkt_reltoken(&p->p_token);
480 			if (p->p_pgrp != pg) {
481 				PRELE(p);
482 				goto restart;
483 			}
484 			PRELE(p);
485 		}
486 		lwkt_reltoken(&pg->pg_token);
487 		pgrel(pg);
488 		break;
489 	case PRIO_USER:
490 		if (who == 0)
491 			who = curtd->td_ucred->cr_uid;
492 		info.prio = uap->prio;
493 		info.who = who;
494 		info.error = 0;
495 		info.found = 0;
496 		allproc_scan(ioprio_set_callback, &info, 0);
497 		error = info.error;
498 		found = info.found;
499 		break;
500 	default:
501 		error = EINVAL;
502 		found = 1;
503 		break;
504 	}
505 
506 	if (found == 0)
507 		error = ESRCH;
508 	return (error);
509 }
510 
511 static
512 int
ioprio_set_callback(struct proc * p,void * data)513 ioprio_set_callback(struct proc *p, void *data)
514 {
515 	struct ioprio_set_info *info = data;
516 	int error;
517 
518 	lwkt_gettoken(&p->p_token);
519 	if (p->p_ucred->cr_uid == info->who &&
520 	    PRISON_CHECK(curthread->td_ucred, p->p_ucred)) {
521 		error = doionice(p, info->prio);
522 		if (error)
523 			info->error = error;
524 		++info->found;
525 	}
526 	lwkt_reltoken(&p->p_token);
527 	return(0);
528 }
529 
530 static int
doionice(struct proc * chgp,int n)531 doionice(struct proc *chgp, int n)
532 {
533 	struct ucred *cr = curthread->td_ucred;
534 
535 	if (cr->cr_uid && cr->cr_ruid &&
536 	    cr->cr_uid != chgp->p_ucred->cr_uid &&
537 	    cr->cr_ruid != chgp->p_ucred->cr_uid)
538 		return (EPERM);
539 	if (n > IOPRIO_MAX)
540 		n = IOPRIO_MAX;
541 	if (n < IOPRIO_MIN)
542 		n = IOPRIO_MIN;
543 	if (n < chgp->p_ionice &&
544 	    caps_priv_check(cr, SYSCAP_NOSCHED))
545 	{
546 		return (EACCES);
547 	}
548 	chgp->p_ionice = n;
549 
550 	return (0);
551 
552 }
553 
554 /*
555  * MPALMOSTSAFE
556  */
557 int
sys_lwp_rtprio(struct sysmsg * sysmsg,const struct lwp_rtprio_args * uap)558 sys_lwp_rtprio(struct sysmsg *sysmsg, const struct lwp_rtprio_args *uap)
559 {
560 	struct ucred *cr = curthread->td_ucred;
561 	struct proc *p;
562 	struct lwp *lp;
563 	struct rtprio rtp;
564 	int error;
565 
566 	error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
567 	if (error)
568 		return error;
569 	if (uap->pid < 0)
570 		return EINVAL;
571 
572 	if (uap->pid == 0) {
573 		p = curproc;
574 		PHOLD(p);
575 	} else {
576 		p = pfind(uap->pid);
577 	}
578 	if (p == NULL) {
579 		error = ESRCH;
580 		goto done;
581 	}
582 	lwkt_gettoken(&p->p_token);
583 
584 	if (uap->tid < -1) {
585 		error = EINVAL;
586 		goto done;
587 	}
588 	if (uap->tid == -1) {
589 		/*
590 		 * sadly, tid can be 0 so we can't use 0 here
591 		 * like sys_rtprio()
592 		 */
593 		lp = curthread->td_lwp;
594 	} else {
595 		lp = lwp_rb_tree_RB_LOOKUP(&p->p_lwp_tree, uap->tid);
596 		if (lp == NULL) {
597 			error = ESRCH;
598 			goto done;
599 		}
600 	}
601 
602 	/*
603 	 * Make sure that this lwp is not ripped if any of the following
604 	 * code blocks, e.g. copyout.
605 	 */
606 	LWPHOLD(lp);
607 	switch (uap->function) {
608 	case RTP_LOOKUP:
609 		error = copyout(&lp->lwp_rtprio, uap->rtp,
610 				sizeof(struct rtprio));
611 		break;
612 	case RTP_SET:
613 		if (cr->cr_uid && cr->cr_ruid &&
614 		    cr->cr_uid != p->p_ucred->cr_uid &&
615 		    cr->cr_ruid != p->p_ucred->cr_uid) {
616 			error = EPERM;
617 			break;
618 		}
619 		/* disallow setting rtprio in most cases if not superuser */
620 		if (caps_priv_check(cr, SYSCAP_NOSCHED)) {
621 			/* can't set someone else's */
622 			if (uap->pid) { /* XXX */
623 				error = EPERM;
624 				break;
625 			}
626 			/* can't set realtime priority */
627 /*
628  * Realtime priority has to be restricted for reasons which should be
629  * obvious. However, for idle priority, there is a potential for
630  * system deadlock if an idleprio process gains a lock on a resource
631  * that other processes need (and the idleprio process can't run
632  * due to a CPU-bound normal process). Fix me! XXX
633  */
634  			if (RTP_PRIO_IS_REALTIME(rtp.type)) {
635 				error = EPERM;
636 				break;
637 			}
638 		}
639 		switch (rtp.type) {
640 #ifdef RTP_PRIO_FIFO
641 		case RTP_PRIO_FIFO:
642 #endif
643 		case RTP_PRIO_REALTIME:
644 		case RTP_PRIO_NORMAL:
645 		case RTP_PRIO_IDLE:
646 			if (rtp.prio > RTP_PRIO_MAX) {
647 				error = EINVAL;
648 			} else {
649 				lp->lwp_rtprio = rtp;
650 				error = 0;
651 			}
652 			break;
653 		default:
654 			error = EINVAL;
655 			break;
656 		}
657 		break;
658 	default:
659 		error = EINVAL;
660 		break;
661 	}
662 	LWPRELE(lp);
663 
664 done:
665 	if (p) {
666 		lwkt_reltoken(&p->p_token);
667 		PRELE(p);
668 	}
669 	return (error);
670 }
671 
672 /*
673  * Set realtime priority
674  *
675  * MPALMOSTSAFE
676  */
677 int
sys_rtprio(struct sysmsg * sysmsg,const struct rtprio_args * uap)678 sys_rtprio(struct sysmsg *sysmsg, const struct rtprio_args *uap)
679 {
680 	struct ucred *cr = curthread->td_ucred;
681 	struct proc *p;
682 	struct lwp *lp;
683 	struct rtprio rtp;
684 	int error;
685 
686 	error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
687 	if (error)
688 		return (error);
689 
690 	if (uap->pid == 0) {
691 		p = curproc;
692 		PHOLD(p);
693 	} else {
694 		p = pfind(uap->pid);
695 	}
696 
697 	if (p == NULL) {
698 		error = ESRCH;
699 		goto done;
700 	}
701 	lwkt_gettoken(&p->p_token);
702 
703 	/* XXX lwp */
704 	lp = FIRST_LWP_IN_PROC(p);
705 	switch (uap->function) {
706 	case RTP_LOOKUP:
707 		error = copyout(&lp->lwp_rtprio, uap->rtp,
708 				sizeof(struct rtprio));
709 		break;
710 	case RTP_SET:
711 		if (cr->cr_uid && cr->cr_ruid &&
712 		    cr->cr_uid != p->p_ucred->cr_uid &&
713 		    cr->cr_ruid != p->p_ucred->cr_uid) {
714 			error = EPERM;
715 			break;
716 		}
717 		/* disallow setting rtprio in most cases if not superuser */
718 		if (caps_priv_check(cr, SYSCAP_NOSCHED)) {
719 			/* can't set someone else's */
720 			if (uap->pid) {
721 				error = EPERM;
722 				break;
723 			}
724 			/* can't set realtime priority */
725 /*
726  * Realtime priority has to be restricted for reasons which should be
727  * obvious. However, for idle priority, there is a potential for
728  * system deadlock if an idleprio process gains a lock on a resource
729  * that other processes need (and the idleprio process can't run
730  * due to a CPU-bound normal process). Fix me! XXX
731  */
732 			if (RTP_PRIO_IS_REALTIME(rtp.type)) {
733 				error = EPERM;
734 				break;
735 			}
736 		}
737 		switch (rtp.type) {
738 #ifdef RTP_PRIO_FIFO
739 		case RTP_PRIO_FIFO:
740 #endif
741 		case RTP_PRIO_REALTIME:
742 		case RTP_PRIO_NORMAL:
743 		case RTP_PRIO_IDLE:
744 			if (rtp.prio > RTP_PRIO_MAX) {
745 				error = EINVAL;
746 				break;
747 			}
748 			lp->lwp_rtprio = rtp;
749 			error = 0;
750 			break;
751 		default:
752 			error = EINVAL;
753 			break;
754 		}
755 		break;
756 	default:
757 		error = EINVAL;
758 		break;
759 	}
760 done:
761 	if (p) {
762 		lwkt_reltoken(&p->p_token);
763 		PRELE(p);
764 	}
765 
766 	return (error);
767 }
768 
769 /*
770  * Transform the running time and tick information in lwp lp's thread into user,
771  * system, and interrupt time usage.
772  *
773  * Since we are limited to statclock tick granularity this is a statisical
774  * calculation which will be correct over the long haul, but should not be
775  * expected to measure fine grained deltas.
776  *
777  * It is possible to catch a lwp in the midst of being created, so
778  * check whether lwp_thread is NULL or not.
779  */
780 void
calcru(struct lwp * lp,struct timeval * up,struct timeval * sp)781 calcru(struct lwp *lp, struct timeval *up, struct timeval *sp)
782 {
783 	struct thread *td;
784 
785 	/*
786 	 * Calculate at the statclock level.  YYY if the thread is owned by
787 	 * another cpu we need to forward the request to the other cpu, or
788 	 * have a token to interlock the information in order to avoid racing
789 	 * thread destruction.
790 	 */
791 	if ((td = lp->lwp_thread) != NULL) {
792 		crit_enter();
793 		up->tv_sec = td->td_uticks / 1000000;
794 		up->tv_usec = td->td_uticks % 1000000;
795 		sp->tv_sec = td->td_sticks / 1000000;
796 		sp->tv_usec = td->td_sticks % 1000000;
797 		crit_exit();
798 	}
799 }
800 
801 /*
802  * Aggregate resource statistics of all lwps of a process.
803  *
804  * proc.p_ru keeps track of all statistics directly related to a proc.  This
805  * consists of RSS usage and nswap information and aggregate numbers for all
806  * former lwps of this proc.
807  *
808  * proc.p_cru is the sum of all stats of reaped children.
809  *
810  * lwp.lwp_ru contains the stats directly related to one specific lwp, meaning
811  * packet, scheduler switch or page fault counts, etc.  This information gets
812  * added to lwp.lwp_proc.p_ru when the lwp exits.
813  */
814 void
calcru_proc(struct proc * p,struct rusage * ru)815 calcru_proc(struct proc *p, struct rusage *ru)
816 {
817 	struct timeval upt, spt;
818 	long *rip1, *rip2;
819 	struct lwp *lp;
820 
821 	*ru = p->p_ru;
822 
823 	FOREACH_LWP_IN_PROC(lp, p) {
824 		calcru(lp, &upt, &spt);
825 		timevaladd(&ru->ru_utime, &upt);
826 		timevaladd(&ru->ru_stime, &spt);
827 		for (rip1 = &ru->ru_first, rip2 = &lp->lwp_ru.ru_first;
828 		     rip1 <= &ru->ru_last;
829 		     rip1++, rip2++)
830 			*rip1 += *rip2;
831 	}
832 }
833 
834 
835 /*
836  * MPALMOSTSAFE
837  */
838 int
sys_getrusage(struct sysmsg * sysmsg,const struct getrusage_args * uap)839 sys_getrusage(struct sysmsg *sysmsg, const struct getrusage_args *uap)
840 {
841 	struct proc *p = curproc;
842 	struct rusage ru;
843 	struct rusage *rup;
844 	int error;
845 
846 	lwkt_gettoken(&p->p_token);
847 
848 	switch (uap->who) {
849 	case RUSAGE_SELF:
850 		rup = &ru;
851 		calcru_proc(p, rup);
852 		error = 0;
853 		break;
854 	case RUSAGE_CHILDREN:
855 		rup = &p->p_cru;
856 		error = 0;
857 		break;
858 	default:
859 		error = EINVAL;
860 		break;
861 	}
862 	lwkt_reltoken(&p->p_token);
863 
864 	if (error == 0)
865 		error = copyout(rup, uap->rusage, sizeof(struct rusage));
866 	return (error);
867 }
868 
869 void
ruadd(struct rusage * ru,struct rusage * ru2)870 ruadd(struct rusage *ru, struct rusage *ru2)
871 {
872 	long *ip, *ip2;
873 	int i;
874 
875 	timevaladd(&ru->ru_utime, &ru2->ru_utime);
876 	timevaladd(&ru->ru_stime, &ru2->ru_stime);
877 	if (ru->ru_maxrss < ru2->ru_maxrss)
878 		ru->ru_maxrss = ru2->ru_maxrss;
879 	ip = &ru->ru_first; ip2 = &ru2->ru_first;
880 	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
881 		*ip++ += *ip2++;
882 }
883 
884 /*
885  * Find the uidinfo structure for a uid.  This structure is used to
886  * track the total resource consumption (process count, socket buffer
887  * size, etc.) for the uid and impose limits.
888  */
889 void
uihashinit(void)890 uihashinit(void)
891 {
892 	spin_init(&uihash_lock, "uihashinit");
893 	uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
894 }
895 
896 /*
897  * NOTE: Must be called with uihash_lock held
898  */
899 static struct uidinfo *
uilookup(uid_t uid)900 uilookup(uid_t uid)
901 {
902 	struct	uihashhead *uipp;
903 	struct	uidinfo *uip;
904 
905 	uipp = UIHASH(uid);
906 	LIST_FOREACH(uip, uipp, ui_hash) {
907 		if (uip->ui_uid == uid)
908 			break;
909 	}
910 	return (uip);
911 }
912 
913 /*
914  * Helper function to creat ea uid that could not be found.
915  * This function will properly deal with races.
916  *
917  * WARNING! Should only be used by this source file and by the proc0
918  *	    creation code.
919  */
920 struct uidinfo *
uicreate(uid_t uid)921 uicreate(uid_t uid)
922 {
923 	struct	uidinfo *uip, *tmp;
924 
925 	/*
926 	 * Allocate space and check for a race
927 	 */
928 	uip = kmalloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
929 
930 	/*
931 	 * Initialize structure and enter it into the hash table
932 	 */
933 	spin_init(&uip->ui_lock, "uicreate");
934 	uip->ui_uid = uid;
935 	uip->ui_ref = 1;	/* we're returning a ref */
936 	varsymset_init(&uip->ui_varsymset, NULL);
937 	uip->ui_pcpu = kmalloc(sizeof(*uip->ui_pcpu) * ncpus,
938 			       M_UIDINFO, M_WAITOK | M_ZERO);
939 
940 	/*
941 	 * Somebody may have already created the uidinfo for this
942 	 * uid. If so, return that instead.
943 	 */
944 	spin_lock(&uihash_lock);
945 	tmp = uilookup(uid);
946 	if (tmp != NULL) {
947 		uihold(tmp);
948 		spin_unlock(&uihash_lock);
949 
950 		spin_uninit(&uip->ui_lock);
951 		varsymset_clean(&uip->ui_varsymset);
952 		kfree(uip->ui_pcpu, M_UIDINFO);
953 		kfree(uip, M_UIDINFO);
954 		uip = tmp;
955 	} else {
956 		LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
957 		spin_unlock(&uihash_lock);
958 	}
959 	return (uip);
960 }
961 
962 /*
963  * Find the uidinfo for a uid, creating one if necessary
964  */
965 struct uidinfo *
uifind(uid_t uid)966 uifind(uid_t uid)
967 {
968 	struct uidinfo *uip;
969 	thread_t td = curthread;
970 
971 	if (td->td_ucred) {
972 		uip = td->td_ucred->cr_uidinfo;
973 		if (uip->ui_uid == uid) {
974 			uihold(uip);
975 			return uip;
976 		}
977 		uip = td->td_ucred->cr_ruidinfo;
978 		if (uip->ui_uid == uid) {
979 			uihold(uip);
980 			return uip;
981 		}
982 	}
983 
984 	spin_lock_shared(&uihash_lock);
985 	uip = uilookup(uid);
986 	if (uip == NULL) {
987 		spin_unlock_shared(&uihash_lock);
988 		uip = uicreate(uid);
989 	} else {
990 		uihold(uip);
991 		spin_unlock_shared(&uihash_lock);
992 	}
993 	return (uip);
994 }
995 
996 /*
997  * Helper funtion to remove a uidinfo whos reference count may
998  * have transitioned to 0.  The reference count is likely 0
999  * on-call.
1000  */
1001 static __inline void
uifree(uid_t uid)1002 uifree(uid_t uid)
1003 {
1004 	struct uidinfo *uip;
1005 
1006 	/*
1007 	 * If we are still the only holder after acquiring the uihash_lock
1008 	 * we can safely unlink the uip and destroy it.  Otherwise we lost
1009 	 * a race and must fail.
1010 	 */
1011 	spin_lock(&uihash_lock);
1012 	uip = uilookup(uid);
1013 	if (uip && uip->ui_ref == 0) {
1014 		LIST_REMOVE(uip, ui_hash);
1015 		spin_unlock(&uihash_lock);
1016 
1017 		/*
1018 		 * The uip is now orphaned and we can destroy it at our
1019 		 * leisure.
1020 		 */
1021 		if (uip->ui_sbsize != 0)
1022 			kprintf("freeing uidinfo: uid = %d, sbsize = %jd\n",
1023 			    uip->ui_uid, (intmax_t)uip->ui_sbsize);
1024 		if (uip->ui_proccnt != 0)
1025 			kprintf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1026 			    uip->ui_uid, uip->ui_proccnt);
1027 
1028 		varsymset_clean(&uip->ui_varsymset);
1029 		lockuninit(&uip->ui_varsymset.vx_lock);
1030 		spin_uninit(&uip->ui_lock);
1031 		kfree(uip->ui_pcpu, M_UIDINFO);
1032 		kfree(uip, M_UIDINFO);
1033 	} else {
1034 		spin_unlock(&uihash_lock);
1035 	}
1036 }
1037 
1038 /*
1039  * Bump the ref count
1040  */
1041 void
uihold(struct uidinfo * uip)1042 uihold(struct uidinfo *uip)
1043 {
1044 	KKASSERT(uip->ui_ref >= 0);
1045 	atomic_add_int(&uip->ui_ref, 1);
1046 }
1047 
1048 /*
1049  * Drop the ref count.  The last-drop code still needs to remove the
1050  * uidinfo from the hash table which it does by re-looking-it-up.
1051  *
1052  * NOTE: The uip can be ripped out from under us after the fetchadd.
1053  */
1054 void
uidrop(struct uidinfo * uip)1055 uidrop(struct uidinfo *uip)
1056 {
1057 	uid_t uid;
1058 
1059 	KKASSERT(uip->ui_ref > 0);
1060 	uid = uip->ui_uid;
1061 	cpu_ccfence();
1062 	if (atomic_fetchadd_int(&uip->ui_ref, -1) == 1) {
1063 		uifree(uid);
1064 	}
1065 }
1066 
1067 void
uireplace(struct uidinfo ** puip,struct uidinfo * nuip)1068 uireplace(struct uidinfo **puip, struct uidinfo *nuip)
1069 {
1070 	uidrop(*puip);
1071 	*puip = nuip;
1072 }
1073 
1074 /*
1075  * Change the count associated with number of processes
1076  * a given user is using.
1077  *
1078  * NOTE: When 'max' is 0, don't enforce a limit.
1079  *
1080  * NOTE: Due to concurrency, the count can sometimes exceed the max
1081  *	 by a small amount.
1082  */
1083 int
chgproccnt(struct uidinfo * uip,int diff,int max)1084 chgproccnt(struct uidinfo *uip, int diff, int max)
1085 {
1086 	int ret;
1087 
1088 	/* don't allow them to exceed max, but allow subtraction */
1089 	if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
1090 		ret = 0;
1091 	} else {
1092 		atomic_add_long(&uip->ui_proccnt, diff);
1093 		if (uip->ui_proccnt < 0)
1094 			kprintf("negative proccnt for uid = %d\n", uip->ui_uid);
1095 		ret = 1;
1096 	}
1097 	return ret;
1098 }
1099 
1100 /*
1101  * Change the total socket buffer size a user has used.
1102  */
1103 int
chgsbsize(struct uidinfo * uip,u_long * hiwat,u_long to,rlim_t max)1104 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t max)
1105 {
1106 	rlim_t new;
1107 
1108 	rlim_t sbsize;
1109 
1110 	sbsize = atomic_fetchadd_long(&uip->ui_sbsize, to - *hiwat);
1111 	new = sbsize + to - *hiwat;
1112 	KKASSERT(new >= 0);
1113 
1114 	/*
1115 	 * If we are trying to increase the socket buffer size
1116 	 * Scale down the hi water mark when we exceed the user's
1117 	 * allowed socket buffer space.
1118 	 *
1119 	 * We can't scale down too much or we will blow up atomic packet
1120 	 * operations.
1121 	 */
1122 	if (to > *hiwat && to > MCLBYTES && new > max) {
1123 		to = to * max / new;
1124 		if (to < MCLBYTES)
1125 			to = MCLBYTES;
1126 	}
1127 	*hiwat = to;
1128 	return (1);
1129 }
1130