1 /* 2 * Copyright (c) 2006,2017,2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 /* 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94 68 */ 69 #include <sys/resource.h> 70 #include <sys/spinlock.h> 71 #include <sys/proc.h> 72 #include <sys/priv.h> 73 #include <sys/file.h> 74 #include <sys/lockf.h> 75 #include <sys/kern_syscall.h> 76 #include <sys/malloc.h> 77 78 #include <vm/vm_param.h> 79 #include <vm/vm.h> 80 #include <vm/vm_map.h> 81 82 #include <machine/pmap.h> 83 84 #include <sys/spinlock2.h> 85 86 static MALLOC_DEFINE(M_PLIMIT, "plimit", "resource limits"); 87 88 static void plimit_copy(struct plimit *olimit, struct plimit *nlimit); 89 90 static __inline 91 struct plimit * 92 readplimits(struct proc *p) 93 { 94 thread_t td = curthread; 95 struct plimit *limit; 96 97 limit = td->td_limit; 98 if (limit != p->p_limit) { 99 spin_lock_shared(&p->p_spin); 100 limit = p->p_limit; 101 atomic_add_int(&limit->p_refcnt, 1); 102 spin_unlock_shared(&p->p_spin); 103 if (td->td_limit) 104 plimit_free(td->td_limit); 105 td->td_limit = limit; 106 } 107 return limit; 108 } 109 110 /* 111 * Initialize proc0's plimit structure. All later plimit structures 112 * are inherited through fork. 113 */ 114 void 115 plimit_init0(struct plimit *limit) 116 { 117 int i; 118 rlim_t lim; 119 120 for (i = 0; i < RLIM_NLIMITS; ++i) { 121 limit->pl_rlimit[i].rlim_cur = RLIM_INFINITY; 122 limit->pl_rlimit[i].rlim_max = RLIM_INFINITY; 123 } 124 limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur = maxfiles; 125 limit->pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles; 126 limit->pl_rlimit[RLIMIT_NPROC].rlim_cur = maxproc; 127 limit->pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; 128 lim = ptoa((rlim_t)vmstats.v_free_count); 129 limit->pl_rlimit[RLIMIT_RSS].rlim_max = lim; 130 limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = lim; 131 limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3; 132 limit->p_cpulimit = RLIM_INFINITY; 133 limit->p_refcnt = 1; 134 spin_init(&limit->p_spin, "plimitinit"); 135 } 136 137 /* 138 * Return a plimit for use by a new forked process given the one 139 * contained in the parent process. 140 */ 141 struct plimit * 142 plimit_fork(struct proc *p1) 143 { 144 struct plimit *olimit = p1->p_limit; 145 struct plimit *nlimit; 146 uint32_t count; 147 148 /* 149 * Try to share the parent's plimit structure. If we cannot, make 150 * a copy. 151 * 152 * NOTE: (count) value is field prior to increment. 153 */ 154 count = atomic_fetchadd_int(&olimit->p_refcnt, 1); 155 cpu_ccfence(); 156 if (count & PLIMITF_EXCLUSIVE) { 157 if ((count & PLIMITF_MASK) == 1 && p1->p_nthreads == 1) { 158 atomic_clear_int(&olimit->p_refcnt, PLIMITF_EXCLUSIVE); 159 } else { 160 nlimit = kmalloc(sizeof(*nlimit), M_PLIMIT, M_WAITOK); 161 plimit_copy(olimit, nlimit); 162 olimit = nlimit; 163 } 164 } 165 return olimit; 166 } 167 168 /* 169 * This routine is called when a new LWP is created for a process. We 170 * must force exclusivity to ensure that p->p_limit remains stable. 171 * 172 * LWPs share the same process structure so this does not bump refcnt. 173 */ 174 void 175 plimit_lwp_fork(struct proc *p) 176 { 177 struct plimit *olimit = p->p_limit; 178 struct plimit *nlimit; 179 uint32_t count; 180 181 count = olimit->p_refcnt; 182 cpu_ccfence(); 183 if ((count & PLIMITF_EXCLUSIVE) == 0) { 184 if (count != 1) { 185 nlimit = kmalloc(sizeof(*nlimit), M_PLIMIT, M_WAITOK); 186 plimit_copy(olimit, nlimit); 187 p->p_limit = nlimit; 188 plimit_free(olimit); 189 olimit = nlimit; 190 } 191 atomic_set_int(&olimit->p_refcnt, PLIMITF_EXCLUSIVE); 192 } 193 } 194 195 /* 196 * This routine is called to fixup a process's p_limit structure prior 197 * to it being modified. If index >= 0 the specified modification is also 198 * made. 199 * 200 * This routine must make the limit structure exclusive. If we are threaded, 201 * the structure will already be exclusive. A later fork will convert it 202 * back to copy-on-write if possible. 203 * 204 * We can count on p->p_limit being stable since if we had created any 205 * threads it will have already been made exclusive. 206 */ 207 void 208 plimit_modify(struct proc *p, int index, struct rlimit *rlim) 209 { 210 struct plimit *olimit; 211 struct plimit *nlimit; 212 uint32_t count; 213 214 /* 215 * Make exclusive 216 */ 217 olimit = p->p_limit; 218 count = olimit->p_refcnt; 219 cpu_ccfence(); 220 if ((count & PLIMITF_EXCLUSIVE) == 0) { 221 if (count != 1) { 222 nlimit = kmalloc(sizeof(*nlimit), M_PLIMIT, M_WAITOK); 223 plimit_copy(olimit, nlimit); 224 p->p_limit = nlimit; 225 plimit_free(olimit); 226 olimit = nlimit; 227 } 228 atomic_set_int(&olimit->p_refcnt, PLIMITF_EXCLUSIVE); 229 } 230 231 /* 232 * Make modification 233 */ 234 if (index >= 0) { 235 if (p->p_nthreads == 1) { 236 p->p_limit->pl_rlimit[index] = *rlim; 237 } else { 238 spin_lock(&olimit->p_spin); 239 p->p_limit->pl_rlimit[index].rlim_cur = rlim->rlim_cur; 240 p->p_limit->pl_rlimit[index].rlim_max = rlim->rlim_max; 241 spin_unlock(&olimit->p_spin); 242 } 243 } 244 } 245 246 /* 247 * Destroy a process's plimit structure. 248 */ 249 void 250 plimit_free(struct plimit *limit) 251 { 252 uint32_t count; 253 254 count = atomic_fetchadd_int(&limit->p_refcnt, -1); 255 256 if ((count & ~PLIMITF_EXCLUSIVE) == 1) { 257 limit->p_refcnt = -999; 258 kfree(limit, M_PLIMIT); 259 } 260 } 261 262 /* 263 * Modify a resource limit (from system call) 264 */ 265 int 266 kern_setrlimit(u_int which, struct rlimit *limp) 267 { 268 struct proc *p = curproc; 269 struct plimit *limit; 270 struct rlimit *alimp; 271 int error; 272 273 if (which >= RLIM_NLIMITS) 274 return (EINVAL); 275 276 /* 277 * We will be modifying a resource, make a copy if necessary. 278 */ 279 plimit_modify(p, -1, NULL); 280 limit = p->p_limit; 281 alimp = &limit->pl_rlimit[which]; 282 283 /* 284 * Preserve historical bugs by treating negative limits as unsigned. 285 */ 286 if (limp->rlim_cur < 0) 287 limp->rlim_cur = RLIM_INFINITY; 288 if (limp->rlim_max < 0) 289 limp->rlim_max = RLIM_INFINITY; 290 291 spin_lock(&limit->p_spin); 292 if (limp->rlim_cur > alimp->rlim_max || 293 limp->rlim_max > alimp->rlim_max) { 294 spin_unlock(&limit->p_spin); 295 error = priv_check_cred(p->p_ucred, PRIV_PROC_SETRLIMIT, 0); 296 if (error) 297 return (error); 298 } else { 299 spin_unlock(&limit->p_spin); 300 } 301 if (limp->rlim_cur > limp->rlim_max) 302 limp->rlim_cur = limp->rlim_max; 303 304 switch (which) { 305 case RLIMIT_CPU: 306 spin_lock(&limit->p_spin); 307 if (limp->rlim_cur > RLIM_INFINITY / (rlim_t)1000000) 308 limit->p_cpulimit = RLIM_INFINITY; 309 else 310 limit->p_cpulimit = (rlim_t)1000000 * limp->rlim_cur; 311 spin_unlock(&limit->p_spin); 312 break; 313 case RLIMIT_DATA: 314 if (limp->rlim_cur > maxdsiz) 315 limp->rlim_cur = maxdsiz; 316 if (limp->rlim_max > maxdsiz) 317 limp->rlim_max = maxdsiz; 318 break; 319 320 case RLIMIT_STACK: 321 if (limp->rlim_cur > maxssiz) 322 limp->rlim_cur = maxssiz; 323 if (limp->rlim_max > maxssiz) 324 limp->rlim_max = maxssiz; 325 /* 326 * Stack is allocated to the max at exec time with only 327 * "rlim_cur" bytes accessible. If stack limit is going 328 * up make more accessible, if going down make inaccessible. 329 */ 330 spin_lock(&limit->p_spin); 331 if (limp->rlim_cur != alimp->rlim_cur) { 332 vm_offset_t addr; 333 vm_size_t size; 334 vm_prot_t prot; 335 336 if (limp->rlim_cur > alimp->rlim_cur) { 337 prot = VM_PROT_ALL; 338 size = limp->rlim_cur - alimp->rlim_cur; 339 addr = USRSTACK - limp->rlim_cur; 340 } else { 341 prot = VM_PROT_NONE; 342 size = alimp->rlim_cur - limp->rlim_cur; 343 addr = USRSTACK - alimp->rlim_cur; 344 } 345 spin_unlock(&limit->p_spin); 346 addr = trunc_page(addr); 347 size = round_page(size); 348 vm_map_protect(&p->p_vmspace->vm_map, 349 addr, addr+size, prot, FALSE); 350 } else { 351 spin_unlock(&limit->p_spin); 352 } 353 break; 354 355 case RLIMIT_NOFILE: 356 if (limp->rlim_cur > maxfilesperproc) 357 limp->rlim_cur = maxfilesperproc; 358 if (limp->rlim_max > maxfilesperproc) 359 limp->rlim_max = maxfilesperproc; 360 break; 361 362 case RLIMIT_NPROC: 363 if (limp->rlim_cur > maxprocperuid) 364 limp->rlim_cur = maxprocperuid; 365 if (limp->rlim_max > maxprocperuid) 366 limp->rlim_max = maxprocperuid; 367 if (limp->rlim_cur < 1) 368 limp->rlim_cur = 1; 369 if (limp->rlim_max < 1) 370 limp->rlim_max = 1; 371 break; 372 case RLIMIT_POSIXLOCKS: 373 if (limp->rlim_cur > maxposixlocksperuid) 374 limp->rlim_cur = maxposixlocksperuid; 375 if (limp->rlim_max > maxposixlocksperuid) 376 limp->rlim_max = maxposixlocksperuid; 377 break; 378 } 379 spin_lock(&limit->p_spin); 380 *alimp = *limp; 381 spin_unlock(&limit->p_spin); 382 return (0); 383 } 384 385 /* 386 * The rlimit indexed by which is returned in the second argument. 387 */ 388 int 389 kern_getrlimit(u_int which, struct rlimit *limp) 390 { 391 struct proc *p = curproc; 392 struct plimit *limit; 393 394 /* 395 * p is NULL when kern_getrlimit is called from a 396 * kernel thread. In this case as the calling proc 397 * isn't available we just skip the limit check. 398 */ 399 if (p == NULL) 400 return 0; 401 402 if (which >= RLIM_NLIMITS) 403 return (EINVAL); 404 405 limit = readplimits(p); 406 *limp = limit->pl_rlimit[which]; 407 408 return (0); 409 } 410 411 /* 412 * Determine if the cpu limit has been reached and return an operations 413 * code for the caller to perform. 414 */ 415 int 416 plimit_testcpulimit(struct proc *p, u_int64_t ttime) 417 { 418 struct plimit *limit; 419 struct rlimit *rlim; 420 int mode; 421 422 limit = readplimits(p); 423 424 /* 425 * Initial tests without the spinlock. This is the fast path. 426 * Any 32/64 bit glitches will fall through and retest with 427 * the spinlock. 428 */ 429 if (limit->p_cpulimit == RLIM_INFINITY) 430 return(PLIMIT_TESTCPU_OK); 431 if (ttime <= limit->p_cpulimit) 432 return(PLIMIT_TESTCPU_OK); 433 434 if (ttime > limit->p_cpulimit) { 435 rlim = &limit->pl_rlimit[RLIMIT_CPU]; 436 if (ttime / (rlim_t)1000000 >= rlim->rlim_max + 5) 437 mode = PLIMIT_TESTCPU_KILL; 438 else 439 mode = PLIMIT_TESTCPU_XCPU; 440 } else { 441 mode = PLIMIT_TESTCPU_OK; 442 } 443 444 return(mode); 445 } 446 447 /* 448 * Helper routine to copy olimit to nlimit and initialize nlimit for 449 * use. nlimit's reference count will be set to 1 and its exclusive bit 450 * will be cleared. 451 */ 452 static 453 void 454 plimit_copy(struct plimit *olimit, struct plimit *nlimit) 455 { 456 *nlimit = *olimit; 457 458 spin_init(&nlimit->p_spin, "plimitcopy"); 459 nlimit->p_refcnt = 1; 460 } 461 462 /* 463 * This routine returns the value of a resource, downscaled based on 464 * the processes fork depth and chroot depth (up to 50%). This mechanism 465 * is designed to prevent run-aways from blowing up unrelated processes 466 * running under the same UID. 467 * 468 * NOTE: Currently only applicable to RLIMIT_NPROC. We could also limit 469 * file descriptors but we shouldn't have to as these are allocated 470 * dynamically. 471 */ 472 u_int64_t 473 plimit_getadjvalue(int i) 474 { 475 struct proc *p = curproc; 476 struct plimit *limit; 477 uint64_t v; 478 uint32_t depth; 479 480 limit = p->p_limit; 481 v = limit->pl_rlimit[i].rlim_cur; 482 if (i == RLIMIT_NPROC) { 483 /* 484 * 10% per chroot (around 1/3% per fork depth), with a 485 * maximum of 50% downscaling of the resource limit. 486 */ 487 depth = p->p_depth; 488 if (depth > 32 * 5) 489 depth = 32 * 5; 490 v -= v * depth / 320; 491 } 492 return v; 493 } 494