xref: /dragonfly/sys/kern/kern_plimit.c (revision 6e278935)
1 /*
2  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * Copyright (c) 1982, 1986, 1991, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  * (c) UNIX System Laboratories, Inc.
38  * All or some portions of this file are derived from material licensed
39  * to the University of California by American Telephone and Telegraph
40  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41  * the permission of UNIX System Laboratories, Inc.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
68  * $FreeBSD: src/sys/kern/kern_resource.c,v 1.55.2.5 2001/11/03 01:41:08 ps Exp $
69  * $DragonFly: src/sys/kern/kern_plimit.c,v 1.3 2008/05/08 01:26:00 dillon Exp $
70  */
71 
72 #include <sys/resource.h>
73 #include <sys/spinlock.h>
74 #include <sys/proc.h>
75 #include <sys/priv.h>
76 #include <sys/file.h>
77 #include <sys/lockf.h>
78 #include <sys/kern_syscall.h>
79 
80 #include <vm/vm_param.h>
81 #include <vm/vm.h>
82 #include <vm/vm_map.h>
83 
84 #include <machine/pmap.h>
85 
86 #include <sys/spinlock2.h>
87 
88 static void plimit_copy(struct plimit *olimit, struct plimit *nlimit);
89 
90 /*
91  * Initialize proc0's plimit structure.  All later plimit structures
92  * are inherited through fork.
93  */
94 void
95 plimit_init0(struct plimit *limit)
96 {
97 	int i;
98 	rlim_t lim;
99 
100 	for (i = 0; i < RLIM_NLIMITS; ++i) {
101 		limit->pl_rlimit[i].rlim_cur = RLIM_INFINITY;
102 		limit->pl_rlimit[i].rlim_max = RLIM_INFINITY;
103 	}
104 	limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur = maxfiles;
105 	limit->pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles;
106 	limit->pl_rlimit[RLIMIT_NPROC].rlim_cur = maxproc;
107 	limit->pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
108 	lim = ptoa((rlim_t)vmstats.v_free_count);
109 	limit->pl_rlimit[RLIMIT_RSS].rlim_max = lim;
110 	limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = lim;
111 	limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3;
112 	limit->p_cpulimit = RLIM_INFINITY;
113 	limit->p_refcnt = 1;
114 	spin_init(&limit->p_spin);
115 }
116 
117 /*
118  * Return a plimit for use by a new forked process given the one
119  * contained in the parent process.
120  *
121  * MPSAFE
122  */
123 struct plimit *
124 plimit_fork(struct proc *p1)
125 {
126 	struct plimit *olimit = p1->p_limit;
127 	struct plimit *nlimit = NULL;
128 	struct plimit *rlimit;
129 
130 	/*
131 	 * If we are exclusive (but not threaded-exclusive), but have only
132 	 * one reference, we can convert the structure to copy-on-write
133 	 * again.
134 	 *
135 	 * If we were threaded but are no longer threaded we can do the same
136 	 * thing.
137 	 */
138 	if (olimit->p_exclusive == 1) {
139 		KKASSERT(olimit->p_refcnt == 1);
140 		olimit->p_exclusive = 0;
141 	} else if (olimit->p_exclusive == 2 && p1->p_nthreads == 1) {
142 		KKASSERT(olimit->p_refcnt == 1);
143 		olimit->p_exclusive = 0;
144 	}
145 
146 	/*
147 	 * Take a short-cut that requires limited spin locks.  If we aren't
148 	 * exclusive we will not be threaded and we can just bump the ref
149 	 * count.  If that is true and we also have only one ref then there
150 	 * can be no other accessors.
151 	 */
152 	if (olimit->p_exclusive == 0) {
153 		if (olimit->p_refcnt == 1) {
154 			++olimit->p_refcnt;
155 		} else {
156 			spin_lock(&olimit->p_spin);
157 			++olimit->p_refcnt;
158 			spin_unlock(&olimit->p_spin);
159 		}
160 		return(olimit);
161 	}
162 
163 	/*
164 	 * Full-blown code-up.
165 	 */
166 	nlimit = NULL;
167 	spin_lock(&olimit->p_spin);
168 
169 	for (;;) {
170 		if (olimit->p_exclusive == 0) {
171 			++olimit->p_refcnt;
172 			rlimit = olimit;
173 			break;
174 		}
175 		if (nlimit) {
176 			plimit_copy(olimit, nlimit);
177 			rlimit = nlimit;
178 			nlimit = NULL;
179 			break;
180 		}
181 		spin_unlock(&olimit->p_spin);
182 		nlimit = kmalloc(sizeof(*nlimit), M_SUBPROC, M_WAITOK);
183 		spin_lock(&olimit->p_spin);
184 	}
185 	spin_unlock(&olimit->p_spin);
186 	if (nlimit)
187 		kfree(nlimit, M_SUBPROC);
188 	return(rlimit);
189 }
190 
191 /*
192  * This routine is called when a new LWP is created for a process.  We
193  * must force exclusivity (=2) so p->p_limit remains stable.
194  *
195  * LWPs share the same process structure so this does not bump refcnt.
196  */
197 void
198 plimit_lwp_fork(struct proc *p)
199 {
200 	struct plimit *olimit;
201 
202 	for (;;) {
203 		olimit = p->p_limit;
204 		if (olimit->p_exclusive == 2) {
205 			KKASSERT(olimit->p_refcnt == 1);
206 			break;
207 		}
208 		if (olimit->p_refcnt == 1) {
209 			olimit->p_exclusive = 2;
210 			break;
211 		}
212 		plimit_modify(p, -1, NULL);
213 	}
214 }
215 
216 /*
217  * This routine is called to fixup a proces's p_limit structure prior
218  * to it being modified.  If index >= 0 the specified modification is also
219  * made.
220  *
221  * This routine must make the limit structure exclusive.  A later fork
222  * will convert it back to copy-on-write if possible.
223  *
224  * We can count on p->p_limit being stable since if we had created any
225  * threads it will have already been made exclusive (=2).
226  *
227  * MPSAFE
228  */
229 void
230 plimit_modify(struct proc *p, int index, struct rlimit *rlim)
231 {
232 	struct plimit *olimit;
233 	struct plimit *nlimit;
234 	struct plimit *rlimit;
235 
236 	/*
237 	 * Shortcut.  If we are not threaded we may be able to trivially
238 	 * set the structure to exclusive access without needing to acquire
239 	 * any spinlocks.   The p_limit structure will be stable.
240 	 */
241 	olimit = p->p_limit;
242 	if (p->p_nthreads == 1) {
243 		if (olimit->p_exclusive == 0 && olimit->p_refcnt == 1)
244 			olimit->p_exclusive = 1;
245 		if (olimit->p_exclusive) {
246 			if (index >= 0)
247 				p->p_limit->pl_rlimit[index] = *rlim;
248 			return;
249 		}
250 	}
251 
252 	/*
253 	 * Full-blown code-up.  Make a copy if we aren't exclusive.  If
254 	 * we have only one ref we can safely convert the structure to
255 	 * exclusive without copying.
256 	 */
257 	nlimit = NULL;
258 	spin_lock(&olimit->p_spin);
259 
260 	for (;;) {
261 		if (olimit->p_refcnt == 1) {
262 			if (olimit->p_exclusive == 0)
263 				olimit->p_exclusive = 1;
264 			rlimit = olimit;
265 			break;
266 		}
267 		KKASSERT(olimit->p_exclusive == 0);
268 		if (nlimit) {
269 			plimit_copy(olimit, nlimit);
270 			nlimit->p_exclusive = 1;
271 			p->p_limit = nlimit;
272 			rlimit = nlimit;
273 			nlimit = NULL;
274 			break;
275 		}
276 		spin_unlock(&olimit->p_spin);
277 		nlimit = kmalloc(sizeof(*nlimit), M_SUBPROC, M_WAITOK);
278 		spin_lock(&olimit->p_spin);
279 	}
280 	if (index >= 0)
281 		rlimit->pl_rlimit[index] = *rlim;
282 	spin_unlock(&olimit->p_spin);
283 	if (nlimit)
284 		kfree(nlimit, M_SUBPROC);
285 }
286 
287 /*
288  * Destroy a process's plimit structure.
289  *
290  * MPSAFE
291  */
292 void
293 plimit_free(struct proc *p)
294 {
295 	struct plimit *limit;
296 
297 	if ((limit = p->p_limit) != NULL) {
298 		p->p_limit = NULL;
299 
300 		if (limit->p_refcnt == 1) {
301 			limit->p_refcnt = -999;
302 			kfree(limit, M_SUBPROC);
303 		} else {
304 			spin_lock(&limit->p_spin);
305 			if (--limit->p_refcnt == 0) {
306 				spin_unlock(&limit->p_spin);
307 				kfree(limit, M_SUBPROC);
308 			} else {
309 				spin_unlock(&limit->p_spin);
310 			}
311 		}
312 	}
313 }
314 
315 /*
316  * Modify a resource limit (from system call)
317  *
318  * MPSAFE
319  */
320 int
321 kern_setrlimit(u_int which, struct rlimit *limp)
322 {
323         struct proc *p = curproc;
324 	struct plimit *limit;
325         struct rlimit *alimp;
326         int error;
327 
328         if (which >= RLIM_NLIMITS)
329                 return (EINVAL);
330 
331 	/*
332 	 * We will be modifying a resource, make a copy if necessary.
333 	 */
334 	plimit_modify(p, -1, NULL);
335 	limit = p->p_limit;
336         alimp = &limit->pl_rlimit[which];
337 
338         /*
339          * Preserve historical bugs by treating negative limits as unsigned.
340          */
341         if (limp->rlim_cur < 0)
342                 limp->rlim_cur = RLIM_INFINITY;
343         if (limp->rlim_max < 0)
344                 limp->rlim_max = RLIM_INFINITY;
345 
346 	spin_lock(&limit->p_spin);
347         if (limp->rlim_cur > alimp->rlim_max ||
348             limp->rlim_max > alimp->rlim_max) {
349 		spin_unlock(&limit->p_spin);
350                 error = priv_check_cred(p->p_ucred, PRIV_PROC_SETRLIMIT, 0);
351                 if (error)
352                         return (error);
353 	} else {
354 		spin_unlock(&limit->p_spin);
355 	}
356         if (limp->rlim_cur > limp->rlim_max)
357                 limp->rlim_cur = limp->rlim_max;
358 
359         switch (which) {
360         case RLIMIT_CPU:
361 		spin_lock(&limit->p_spin);
362                 if (limp->rlim_cur > RLIM_INFINITY / (rlim_t)1000000)
363                         limit->p_cpulimit = RLIM_INFINITY;
364                 else
365                         limit->p_cpulimit = (rlim_t)1000000 * limp->rlim_cur;
366 		spin_unlock(&limit->p_spin);
367                 break;
368         case RLIMIT_DATA:
369                 if (limp->rlim_cur > maxdsiz)
370                         limp->rlim_cur = maxdsiz;
371                 if (limp->rlim_max > maxdsiz)
372                         limp->rlim_max = maxdsiz;
373                 break;
374 
375         case RLIMIT_STACK:
376                 if (limp->rlim_cur > maxssiz)
377                         limp->rlim_cur = maxssiz;
378                 if (limp->rlim_max > maxssiz)
379                         limp->rlim_max = maxssiz;
380                 /*
381                  * Stack is allocated to the max at exec time with only
382                  * "rlim_cur" bytes accessible.  If stack limit is going
383                  * up make more accessible, if going down make inaccessible.
384                  */
385 		spin_lock(&limit->p_spin);
386                 if (limp->rlim_cur != alimp->rlim_cur) {
387                         vm_offset_t addr;
388                         vm_size_t size;
389                         vm_prot_t prot;
390 
391                         if (limp->rlim_cur > alimp->rlim_cur) {
392                                 prot = VM_PROT_ALL;
393                                 size = limp->rlim_cur - alimp->rlim_cur;
394                                 addr = USRSTACK - limp->rlim_cur;
395                         } else {
396                                 prot = VM_PROT_NONE;
397                                 size = alimp->rlim_cur - limp->rlim_cur;
398                                 addr = USRSTACK - alimp->rlim_cur;
399                         }
400 			spin_unlock(&limit->p_spin);
401                         addr = trunc_page(addr);
402                         size = round_page(size);
403                         vm_map_protect(&p->p_vmspace->vm_map,
404 				       addr, addr+size, prot, FALSE);
405                 } else {
406 			spin_unlock(&limit->p_spin);
407 		}
408                 break;
409 
410         case RLIMIT_NOFILE:
411                 if (limp->rlim_cur > maxfilesperproc)
412                         limp->rlim_cur = maxfilesperproc;
413                 if (limp->rlim_max > maxfilesperproc)
414                         limp->rlim_max = maxfilesperproc;
415                 break;
416 
417         case RLIMIT_NPROC:
418                 if (limp->rlim_cur > maxprocperuid)
419                         limp->rlim_cur = maxprocperuid;
420                 if (limp->rlim_max > maxprocperuid)
421                         limp->rlim_max = maxprocperuid;
422                 if (limp->rlim_cur < 1)
423                         limp->rlim_cur = 1;
424                 if (limp->rlim_max < 1)
425                         limp->rlim_max = 1;
426                 break;
427         case RLIMIT_POSIXLOCKS:
428                 if (limp->rlim_cur > maxposixlocksperuid)
429                         limp->rlim_cur = maxposixlocksperuid;
430                 if (limp->rlim_max > maxposixlocksperuid)
431                         limp->rlim_max = maxposixlocksperuid;
432                 break;
433         }
434 	spin_lock(&limit->p_spin);
435         *alimp = *limp;
436 	spin_unlock(&limit->p_spin);
437         return (0);
438 }
439 
440 /*
441  * The rlimit indexed by which is returned in the second argument.
442  *
443  * MPSAFE
444  */
445 int
446 kern_getrlimit(u_int which, struct rlimit *limp)
447 {
448 	struct proc *p = curproc;
449 	struct plimit *limit;
450 
451         if (which >= RLIM_NLIMITS)
452                 return (EINVAL);
453 
454 	limit = p->p_limit;
455 	spin_lock(&limit->p_spin);
456         *limp = p->p_rlimit[which];
457 	spin_unlock(&limit->p_spin);
458         return (0);
459 }
460 
461 /*
462  * Determine if the cpu limit has been reached and return an operations
463  * code for the caller to perform.
464  *
465  * MPSAFE
466  */
467 int
468 plimit_testcpulimit(struct plimit *limit, u_int64_t ttime)
469 {
470 	struct rlimit *rlim;
471 	int mode;
472 
473 	/*
474 	 * Initial tests without the spinlock.  This is the fast path.
475 	 * Any 32/64 bit glitches will fall through and retest with
476 	 * the spinlock.
477 	 */
478 	if (limit->p_cpulimit == RLIM_INFINITY)
479 		return(PLIMIT_TESTCPU_OK);
480 	if (ttime <= limit->p_cpulimit)
481 		return(PLIMIT_TESTCPU_OK);
482 
483 	spin_lock(&limit->p_spin);
484 	if (ttime > limit->p_cpulimit) {
485 		rlim = &limit->pl_rlimit[RLIMIT_CPU];
486 		if (ttime / (rlim_t)1000000 >= rlim->rlim_max + 5)
487 			mode = PLIMIT_TESTCPU_KILL;
488 		else
489 			mode = PLIMIT_TESTCPU_XCPU;
490 	} else {
491 		mode = PLIMIT_TESTCPU_OK;
492 	}
493 	spin_unlock(&limit->p_spin);
494 	return(mode);
495 }
496 
497 /*
498  * Helper routine to copy olimit to nlimit and initialize nlimit for
499  * use.  nlimit's reference count will be set to 1 and its exclusive bit
500  * will be cleared.
501  *
502  * MPSAFE
503  */
504 static
505 void
506 plimit_copy(struct plimit *olimit, struct plimit *nlimit)
507 {
508 	*nlimit = *olimit;
509 
510 	spin_init(&nlimit->p_spin);
511 	nlimit->p_refcnt = 1;
512 	nlimit->p_exclusive = 0;
513 }
514 
515