xref: /dragonfly/sys/kern/kern_plimit.c (revision 4bab7bf3)
1 /*
2  * Copyright (c) 2006,2017 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * Copyright (c) 1982, 1986, 1991, 1993
36  *	The Regents of the University of California.  All rights reserved.
37  * (c) UNIX System Laboratories, Inc.
38  * All or some portions of this file are derived from material licensed
39  * to the University of California by American Telephone and Telegraph
40  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41  * the permission of UNIX System Laboratories, Inc.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
68  */
69 #include <sys/resource.h>
70 #include <sys/spinlock.h>
71 #include <sys/proc.h>
72 #include <sys/priv.h>
73 #include <sys/file.h>
74 #include <sys/lockf.h>
75 #include <sys/kern_syscall.h>
76 
77 #include <vm/vm_param.h>
78 #include <vm/vm.h>
79 #include <vm/vm_map.h>
80 
81 #include <machine/pmap.h>
82 
83 #include <sys/spinlock2.h>
84 
85 static MALLOC_DEFINE(M_PLIMIT, "plimit", "resource limits");
86 
87 static void plimit_copy(struct plimit *olimit, struct plimit *nlimit);
88 
89 /*
90  * Initialize proc0's plimit structure.  All later plimit structures
91  * are inherited through fork.
92  */
93 void
94 plimit_init0(struct plimit *limit)
95 {
96 	int i;
97 	rlim_t lim;
98 
99 	for (i = 0; i < RLIM_NLIMITS; ++i) {
100 		limit->pl_rlimit[i].rlim_cur = RLIM_INFINITY;
101 		limit->pl_rlimit[i].rlim_max = RLIM_INFINITY;
102 	}
103 	limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur = maxfiles;
104 	limit->pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles;
105 	limit->pl_rlimit[RLIMIT_NPROC].rlim_cur = maxproc;
106 	limit->pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
107 	lim = ptoa((rlim_t)vmstats.v_free_count);
108 	limit->pl_rlimit[RLIMIT_RSS].rlim_max = lim;
109 	limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = lim;
110 	limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3;
111 	limit->p_cpulimit = RLIM_INFINITY;
112 	limit->p_refcnt = 1;
113 	spin_init(&limit->p_spin, "plimitinit");
114 }
115 
116 /*
117  * Return a plimit for use by a new forked process given the one
118  * contained in the parent process.
119  */
120 struct plimit *
121 plimit_fork(struct proc *p1)
122 {
123 	struct plimit *olimit = p1->p_limit;
124 	struct plimit *nlimit;
125 	uint32_t count;
126 
127 	/*
128 	 * Try to share the parent's plimit structure.  If we cannot, make
129 	 * a copy.
130 	 *
131 	 * NOTE: (count) value is field prior to increment.
132 	 */
133 	count = atomic_fetchadd_int(&olimit->p_refcnt, 1);
134 	cpu_ccfence();
135 	if (count & PLIMITF_EXCLUSIVE) {
136 		if ((count & PLIMITF_MASK) == 1 && p1->p_nthreads == 1) {
137 			atomic_clear_int(&olimit->p_refcnt, PLIMITF_EXCLUSIVE);
138 		} else {
139 			nlimit = kmalloc(sizeof(*nlimit), M_PLIMIT, M_WAITOK);
140 			plimit_copy(olimit, nlimit);
141 			olimit = nlimit;
142 		}
143 	}
144 	return olimit;
145 }
146 
147 /*
148  * This routine is called when a new LWP is created for a process.  We
149  * must force exclusivity to ensure that p->p_limit remains stable.
150  *
151  * LWPs share the same process structure so this does not bump refcnt.
152  */
153 void
154 plimit_lwp_fork(struct proc *p)
155 {
156 	struct plimit *olimit = p->p_limit;
157 	struct plimit *nlimit;
158 	uint32_t count;
159 
160 	count = olimit->p_refcnt;
161 	cpu_ccfence();
162 	if ((count & PLIMITF_EXCLUSIVE) == 0) {
163 		if (count != 1) {
164 			nlimit = kmalloc(sizeof(*nlimit), M_PLIMIT, M_WAITOK);
165 			plimit_copy(olimit, nlimit);
166 			p->p_limit = nlimit;
167 			plimit_free(olimit);
168 			olimit = nlimit;
169 		}
170 		atomic_set_int(&olimit->p_refcnt, PLIMITF_EXCLUSIVE);
171 	}
172 }
173 
174 /*
175  * This routine is called to fixup a process's p_limit structure prior
176  * to it being modified.  If index >= 0 the specified modification is also
177  * made.
178  *
179  * This routine must make the limit structure exclusive.  If we are threaded,
180  * the structure will already be exclusive.  A later fork will convert it
181  * back to copy-on-write if possible.
182  *
183  * We can count on p->p_limit being stable since if we had created any
184  * threads it will have already been made exclusive.
185  */
186 void
187 plimit_modify(struct proc *p, int index, struct rlimit *rlim)
188 {
189 	struct plimit *olimit;
190 	struct plimit *nlimit;
191 	uint32_t count;
192 
193 	/*
194 	 * Make exclusive
195 	 */
196 	olimit = p->p_limit;
197 	count = olimit->p_refcnt;
198 	cpu_ccfence();
199 	if ((count & PLIMITF_EXCLUSIVE) == 0) {
200 		if (count != 1) {
201 			nlimit = kmalloc(sizeof(*nlimit), M_PLIMIT, M_WAITOK);
202 			plimit_copy(olimit, nlimit);
203 			p->p_limit = nlimit;
204 			plimit_free(olimit);
205 			olimit = nlimit;
206 		}
207 		atomic_set_int(&olimit->p_refcnt, PLIMITF_EXCLUSIVE);
208 	}
209 
210 	/*
211 	 * Make modification
212 	 */
213 	if (index >= 0) {
214 		if (p->p_nthreads == 1) {
215 			p->p_limit->pl_rlimit[index] = *rlim;
216 		} else {
217 			spin_lock(&olimit->p_spin);
218 			p->p_limit->pl_rlimit[index].rlim_cur = rlim->rlim_cur;
219 			p->p_limit->pl_rlimit[index].rlim_max = rlim->rlim_max;
220 			spin_unlock(&olimit->p_spin);
221 		}
222 	}
223 }
224 
225 /*
226  * Destroy a process's plimit structure.
227  */
228 void
229 plimit_free(struct plimit *limit)
230 {
231 	uint32_t count;
232 
233 	count = atomic_fetchadd_int(&limit->p_refcnt, -1);
234 
235 	if ((count & ~PLIMITF_EXCLUSIVE) == 1) {
236 		limit->p_refcnt = -999;
237 		kfree(limit, M_PLIMIT);
238 	}
239 }
240 
241 /*
242  * Modify a resource limit (from system call)
243  */
244 int
245 kern_setrlimit(u_int which, struct rlimit *limp)
246 {
247         struct proc *p = curproc;
248 	struct plimit *limit;
249         struct rlimit *alimp;
250         int error;
251 
252         if (which >= RLIM_NLIMITS)
253                 return (EINVAL);
254 
255 	/*
256 	 * We will be modifying a resource, make a copy if necessary.
257 	 */
258 	plimit_modify(p, -1, NULL);
259 	limit = p->p_limit;
260         alimp = &limit->pl_rlimit[which];
261 
262         /*
263          * Preserve historical bugs by treating negative limits as unsigned.
264          */
265         if (limp->rlim_cur < 0)
266                 limp->rlim_cur = RLIM_INFINITY;
267         if (limp->rlim_max < 0)
268                 limp->rlim_max = RLIM_INFINITY;
269 
270 	spin_lock(&limit->p_spin);
271         if (limp->rlim_cur > alimp->rlim_max ||
272             limp->rlim_max > alimp->rlim_max) {
273 		spin_unlock(&limit->p_spin);
274                 error = priv_check_cred(p->p_ucred, PRIV_PROC_SETRLIMIT, 0);
275                 if (error)
276                         return (error);
277 	} else {
278 		spin_unlock(&limit->p_spin);
279 	}
280         if (limp->rlim_cur > limp->rlim_max)
281                 limp->rlim_cur = limp->rlim_max;
282 
283         switch (which) {
284         case RLIMIT_CPU:
285 		spin_lock(&limit->p_spin);
286                 if (limp->rlim_cur > RLIM_INFINITY / (rlim_t)1000000)
287                         limit->p_cpulimit = RLIM_INFINITY;
288                 else
289                         limit->p_cpulimit = (rlim_t)1000000 * limp->rlim_cur;
290 		spin_unlock(&limit->p_spin);
291                 break;
292         case RLIMIT_DATA:
293                 if (limp->rlim_cur > maxdsiz)
294                         limp->rlim_cur = maxdsiz;
295                 if (limp->rlim_max > maxdsiz)
296                         limp->rlim_max = maxdsiz;
297                 break;
298 
299         case RLIMIT_STACK:
300                 if (limp->rlim_cur > maxssiz)
301                         limp->rlim_cur = maxssiz;
302                 if (limp->rlim_max > maxssiz)
303                         limp->rlim_max = maxssiz;
304                 /*
305                  * Stack is allocated to the max at exec time with only
306                  * "rlim_cur" bytes accessible.  If stack limit is going
307                  * up make more accessible, if going down make inaccessible.
308                  */
309 		spin_lock(&limit->p_spin);
310                 if (limp->rlim_cur != alimp->rlim_cur) {
311                         vm_offset_t addr;
312                         vm_size_t size;
313                         vm_prot_t prot;
314 
315                         if (limp->rlim_cur > alimp->rlim_cur) {
316                                 prot = VM_PROT_ALL;
317                                 size = limp->rlim_cur - alimp->rlim_cur;
318                                 addr = USRSTACK - limp->rlim_cur;
319                         } else {
320                                 prot = VM_PROT_NONE;
321                                 size = alimp->rlim_cur - limp->rlim_cur;
322                                 addr = USRSTACK - alimp->rlim_cur;
323                         }
324 			spin_unlock(&limit->p_spin);
325                         addr = trunc_page(addr);
326                         size = round_page(size);
327                         vm_map_protect(&p->p_vmspace->vm_map,
328 				       addr, addr+size, prot, FALSE);
329                 } else {
330 			spin_unlock(&limit->p_spin);
331 		}
332                 break;
333 
334         case RLIMIT_NOFILE:
335                 if (limp->rlim_cur > maxfilesperproc)
336                         limp->rlim_cur = maxfilesperproc;
337                 if (limp->rlim_max > maxfilesperproc)
338                         limp->rlim_max = maxfilesperproc;
339                 break;
340 
341         case RLIMIT_NPROC:
342                 if (limp->rlim_cur > maxprocperuid)
343                         limp->rlim_cur = maxprocperuid;
344                 if (limp->rlim_max > maxprocperuid)
345                         limp->rlim_max = maxprocperuid;
346                 if (limp->rlim_cur < 1)
347                         limp->rlim_cur = 1;
348                 if (limp->rlim_max < 1)
349                         limp->rlim_max = 1;
350                 break;
351         case RLIMIT_POSIXLOCKS:
352                 if (limp->rlim_cur > maxposixlocksperuid)
353                         limp->rlim_cur = maxposixlocksperuid;
354                 if (limp->rlim_max > maxposixlocksperuid)
355                         limp->rlim_max = maxposixlocksperuid;
356                 break;
357         }
358 	spin_lock(&limit->p_spin);
359         *alimp = *limp;
360 	spin_unlock(&limit->p_spin);
361         return (0);
362 }
363 
364 /*
365  * The rlimit indexed by which is returned in the second argument.
366  */
367 int
368 kern_getrlimit(u_int which, struct rlimit *limp)
369 {
370 	struct proc *p = curproc;
371 	struct plimit *limit;
372 
373 	/*
374 	 * p is NULL when kern_getrlimit is called from a
375 	 * kernel thread. In this case as the calling proc
376 	 * isn't available we just skip the limit check.
377 	 */
378 	if (p == NULL)
379 		return 0;
380 
381         if (which >= RLIM_NLIMITS)
382                 return (EINVAL);
383 
384 	limit = p->p_limit;
385         *limp = p->p_rlimit[which];
386 
387         return (0);
388 }
389 
390 /*
391  * Determine if the cpu limit has been reached and return an operations
392  * code for the caller to perform.
393  */
394 int
395 plimit_testcpulimit(struct plimit *limit, u_int64_t ttime)
396 {
397 	struct rlimit *rlim;
398 	int mode;
399 
400 	/*
401 	 * Initial tests without the spinlock.  This is the fast path.
402 	 * Any 32/64 bit glitches will fall through and retest with
403 	 * the spinlock.
404 	 */
405 	if (limit->p_cpulimit == RLIM_INFINITY)
406 		return(PLIMIT_TESTCPU_OK);
407 	if (ttime <= limit->p_cpulimit)
408 		return(PLIMIT_TESTCPU_OK);
409 
410 	if (ttime > limit->p_cpulimit) {
411 		rlim = &limit->pl_rlimit[RLIMIT_CPU];
412 		if (ttime / (rlim_t)1000000 >= rlim->rlim_max + 5)
413 			mode = PLIMIT_TESTCPU_KILL;
414 		else
415 			mode = PLIMIT_TESTCPU_XCPU;
416 	} else {
417 		mode = PLIMIT_TESTCPU_OK;
418 	}
419 
420 	return(mode);
421 }
422 
423 /*
424  * Helper routine to copy olimit to nlimit and initialize nlimit for
425  * use.  nlimit's reference count will be set to 1 and its exclusive bit
426  * will be cleared.
427  */
428 static
429 void
430 plimit_copy(struct plimit *olimit, struct plimit *nlimit)
431 {
432 	*nlimit = *olimit;
433 
434 	spin_init(&nlimit->p_spin, "plimitcopy");
435 	nlimit->p_refcnt = 1;
436 }
437 
438 /*
439  * This routine returns the value of a resource, downscaled based on
440  * the processes fork depth and chroot depth (up to 50%).  This mechanism
441  * is designed to prevent run-aways from blowing up unrelated processes
442  * running under the same UID.
443  *
444  * NOTE: Currently only applicable to RLIMIT_NPROC.  We could also limit
445  *	 file descriptors but we shouldn't have to as these are allocated
446  *	 dynamically.
447  */
448 u_int64_t
449 plimit_getadjvalue(int i)
450 {
451 	struct proc *p = curproc;
452 	struct plimit *limit;
453 	uint64_t v;
454 	uint32_t depth;
455 
456 	limit = p->p_limit;
457 	v = limit->pl_rlimit[i].rlim_cur;
458 	if (i == RLIMIT_NPROC) {
459 		/*
460 		 * 10% per chroot (around 1/3% per fork depth), with a
461 		 * maximum of 50% downscaling of the resource limit.
462 		 */
463 		depth = p->p_depth;
464 		if (depth > 32 * 5)
465 			depth = 32 * 5;
466 		v -= v * depth / 320;
467 	}
468 	return v;
469 }
470