1 /* 2 * Copyright (c) 2006,2017,2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 /* 35 * Copyright (c) 1982, 1986, 1991, 1993 36 * The Regents of the University of California. All rights reserved. 37 * (c) UNIX System Laboratories, Inc. 38 * All or some portions of this file are derived from material licensed 39 * to the University of California by American Telephone and Telegraph 40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * the permission of UNIX System Laboratories, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of the University nor the names of its contributors 52 * may be used to endorse or promote products derived from this software 53 * without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * SUCH DAMAGE. 66 * 67 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94 68 */ 69 #include <sys/resource.h> 70 #include <sys/spinlock.h> 71 #include <sys/proc.h> 72 #include <sys/priv.h> 73 #include <sys/file.h> 74 #include <sys/lockf.h> 75 #include <sys/kern_syscall.h> 76 77 #include <vm/vm_param.h> 78 #include <vm/vm.h> 79 #include <vm/vm_map.h> 80 81 #include <machine/pmap.h> 82 83 #include <sys/spinlock2.h> 84 85 static MALLOC_DEFINE(M_PLIMIT, "plimit", "resource limits"); 86 87 static void plimit_copy(struct plimit *olimit, struct plimit *nlimit); 88 89 static __inline 90 struct plimit * 91 readplimits(struct proc *p) 92 { 93 thread_t td = curthread; 94 struct plimit *limit; 95 96 limit = td->td_limit; 97 if (limit != p->p_limit) { 98 spin_lock_shared(&p->p_spin); 99 limit = p->p_limit; 100 atomic_add_int(&limit->p_refcnt, 1); 101 spin_unlock_shared(&p->p_spin); 102 if (td->td_limit) 103 plimit_free(td->td_limit); 104 td->td_limit = limit; 105 } 106 return limit; 107 } 108 109 /* 110 * Initialize proc0's plimit structure. All later plimit structures 111 * are inherited through fork. 112 */ 113 void 114 plimit_init0(struct plimit *limit) 115 { 116 int i; 117 rlim_t lim; 118 119 for (i = 0; i < RLIM_NLIMITS; ++i) { 120 limit->pl_rlimit[i].rlim_cur = RLIM_INFINITY; 121 limit->pl_rlimit[i].rlim_max = RLIM_INFINITY; 122 } 123 limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur = maxfiles; 124 limit->pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles; 125 limit->pl_rlimit[RLIMIT_NPROC].rlim_cur = maxproc; 126 limit->pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; 127 lim = ptoa((rlim_t)vmstats.v_free_count); 128 limit->pl_rlimit[RLIMIT_RSS].rlim_max = lim; 129 limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = lim; 130 limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3; 131 limit->p_cpulimit = RLIM_INFINITY; 132 limit->p_refcnt = 1; 133 spin_init(&limit->p_spin, "plimitinit"); 134 } 135 136 /* 137 * Return a plimit for use by a new forked process given the one 138 * contained in the parent process. 139 */ 140 struct plimit * 141 plimit_fork(struct proc *p1) 142 { 143 struct plimit *olimit = p1->p_limit; 144 struct plimit *nlimit; 145 uint32_t count; 146 147 /* 148 * Try to share the parent's plimit structure. If we cannot, make 149 * a copy. 150 * 151 * NOTE: (count) value is field prior to increment. 152 */ 153 count = atomic_fetchadd_int(&olimit->p_refcnt, 1); 154 cpu_ccfence(); 155 if (count & PLIMITF_EXCLUSIVE) { 156 if ((count & PLIMITF_MASK) == 1 && p1->p_nthreads == 1) { 157 atomic_clear_int(&olimit->p_refcnt, PLIMITF_EXCLUSIVE); 158 } else { 159 nlimit = kmalloc(sizeof(*nlimit), M_PLIMIT, M_WAITOK); 160 plimit_copy(olimit, nlimit); 161 olimit = nlimit; 162 } 163 } 164 return olimit; 165 } 166 167 /* 168 * This routine is called when a new LWP is created for a process. We 169 * must force exclusivity to ensure that p->p_limit remains stable. 170 * 171 * LWPs share the same process structure so this does not bump refcnt. 172 */ 173 void 174 plimit_lwp_fork(struct proc *p) 175 { 176 struct plimit *olimit = p->p_limit; 177 struct plimit *nlimit; 178 uint32_t count; 179 180 count = olimit->p_refcnt; 181 cpu_ccfence(); 182 if ((count & PLIMITF_EXCLUSIVE) == 0) { 183 if (count != 1) { 184 nlimit = kmalloc(sizeof(*nlimit), M_PLIMIT, M_WAITOK); 185 plimit_copy(olimit, nlimit); 186 p->p_limit = nlimit; 187 plimit_free(olimit); 188 olimit = nlimit; 189 } 190 atomic_set_int(&olimit->p_refcnt, PLIMITF_EXCLUSIVE); 191 } 192 } 193 194 /* 195 * This routine is called to fixup a process's p_limit structure prior 196 * to it being modified. If index >= 0 the specified modification is also 197 * made. 198 * 199 * This routine must make the limit structure exclusive. If we are threaded, 200 * the structure will already be exclusive. A later fork will convert it 201 * back to copy-on-write if possible. 202 * 203 * We can count on p->p_limit being stable since if we had created any 204 * threads it will have already been made exclusive. 205 */ 206 void 207 plimit_modify(struct proc *p, int index, struct rlimit *rlim) 208 { 209 struct plimit *olimit; 210 struct plimit *nlimit; 211 uint32_t count; 212 213 /* 214 * Make exclusive 215 */ 216 olimit = p->p_limit; 217 count = olimit->p_refcnt; 218 cpu_ccfence(); 219 if ((count & PLIMITF_EXCLUSIVE) == 0) { 220 if (count != 1) { 221 nlimit = kmalloc(sizeof(*nlimit), M_PLIMIT, M_WAITOK); 222 plimit_copy(olimit, nlimit); 223 p->p_limit = nlimit; 224 plimit_free(olimit); 225 olimit = nlimit; 226 } 227 atomic_set_int(&olimit->p_refcnt, PLIMITF_EXCLUSIVE); 228 } 229 230 /* 231 * Make modification 232 */ 233 if (index >= 0) { 234 if (p->p_nthreads == 1) { 235 p->p_limit->pl_rlimit[index] = *rlim; 236 } else { 237 spin_lock(&olimit->p_spin); 238 p->p_limit->pl_rlimit[index].rlim_cur = rlim->rlim_cur; 239 p->p_limit->pl_rlimit[index].rlim_max = rlim->rlim_max; 240 spin_unlock(&olimit->p_spin); 241 } 242 } 243 } 244 245 /* 246 * Destroy a process's plimit structure. 247 */ 248 void 249 plimit_free(struct plimit *limit) 250 { 251 uint32_t count; 252 253 count = atomic_fetchadd_int(&limit->p_refcnt, -1); 254 255 if ((count & ~PLIMITF_EXCLUSIVE) == 1) { 256 limit->p_refcnt = -999; 257 kfree(limit, M_PLIMIT); 258 } 259 } 260 261 /* 262 * Modify a resource limit (from system call) 263 */ 264 int 265 kern_setrlimit(u_int which, struct rlimit *limp) 266 { 267 struct proc *p = curproc; 268 struct plimit *limit; 269 struct rlimit *alimp; 270 int error; 271 272 if (which >= RLIM_NLIMITS) 273 return (EINVAL); 274 275 /* 276 * We will be modifying a resource, make a copy if necessary. 277 */ 278 plimit_modify(p, -1, NULL); 279 limit = p->p_limit; 280 alimp = &limit->pl_rlimit[which]; 281 282 /* 283 * Preserve historical bugs by treating negative limits as unsigned. 284 */ 285 if (limp->rlim_cur < 0) 286 limp->rlim_cur = RLIM_INFINITY; 287 if (limp->rlim_max < 0) 288 limp->rlim_max = RLIM_INFINITY; 289 290 spin_lock(&limit->p_spin); 291 if (limp->rlim_cur > alimp->rlim_max || 292 limp->rlim_max > alimp->rlim_max) { 293 spin_unlock(&limit->p_spin); 294 error = priv_check_cred(p->p_ucred, PRIV_PROC_SETRLIMIT, 0); 295 if (error) 296 return (error); 297 } else { 298 spin_unlock(&limit->p_spin); 299 } 300 if (limp->rlim_cur > limp->rlim_max) 301 limp->rlim_cur = limp->rlim_max; 302 303 switch (which) { 304 case RLIMIT_CPU: 305 spin_lock(&limit->p_spin); 306 if (limp->rlim_cur > RLIM_INFINITY / (rlim_t)1000000) 307 limit->p_cpulimit = RLIM_INFINITY; 308 else 309 limit->p_cpulimit = (rlim_t)1000000 * limp->rlim_cur; 310 spin_unlock(&limit->p_spin); 311 break; 312 case RLIMIT_DATA: 313 if (limp->rlim_cur > maxdsiz) 314 limp->rlim_cur = maxdsiz; 315 if (limp->rlim_max > maxdsiz) 316 limp->rlim_max = maxdsiz; 317 break; 318 319 case RLIMIT_STACK: 320 if (limp->rlim_cur > maxssiz) 321 limp->rlim_cur = maxssiz; 322 if (limp->rlim_max > maxssiz) 323 limp->rlim_max = maxssiz; 324 /* 325 * Stack is allocated to the max at exec time with only 326 * "rlim_cur" bytes accessible. If stack limit is going 327 * up make more accessible, if going down make inaccessible. 328 */ 329 spin_lock(&limit->p_spin); 330 if (limp->rlim_cur != alimp->rlim_cur) { 331 vm_offset_t addr; 332 vm_size_t size; 333 vm_prot_t prot; 334 335 if (limp->rlim_cur > alimp->rlim_cur) { 336 prot = VM_PROT_ALL; 337 size = limp->rlim_cur - alimp->rlim_cur; 338 addr = USRSTACK - limp->rlim_cur; 339 } else { 340 prot = VM_PROT_NONE; 341 size = alimp->rlim_cur - limp->rlim_cur; 342 addr = USRSTACK - alimp->rlim_cur; 343 } 344 spin_unlock(&limit->p_spin); 345 addr = trunc_page(addr); 346 size = round_page(size); 347 vm_map_protect(&p->p_vmspace->vm_map, 348 addr, addr+size, prot, FALSE); 349 } else { 350 spin_unlock(&limit->p_spin); 351 } 352 break; 353 354 case RLIMIT_NOFILE: 355 if (limp->rlim_cur > maxfilesperproc) 356 limp->rlim_cur = maxfilesperproc; 357 if (limp->rlim_max > maxfilesperproc) 358 limp->rlim_max = maxfilesperproc; 359 break; 360 361 case RLIMIT_NPROC: 362 if (limp->rlim_cur > maxprocperuid) 363 limp->rlim_cur = maxprocperuid; 364 if (limp->rlim_max > maxprocperuid) 365 limp->rlim_max = maxprocperuid; 366 if (limp->rlim_cur < 1) 367 limp->rlim_cur = 1; 368 if (limp->rlim_max < 1) 369 limp->rlim_max = 1; 370 break; 371 case RLIMIT_POSIXLOCKS: 372 if (limp->rlim_cur > maxposixlocksperuid) 373 limp->rlim_cur = maxposixlocksperuid; 374 if (limp->rlim_max > maxposixlocksperuid) 375 limp->rlim_max = maxposixlocksperuid; 376 break; 377 } 378 spin_lock(&limit->p_spin); 379 *alimp = *limp; 380 spin_unlock(&limit->p_spin); 381 return (0); 382 } 383 384 /* 385 * The rlimit indexed by which is returned in the second argument. 386 */ 387 int 388 kern_getrlimit(u_int which, struct rlimit *limp) 389 { 390 struct proc *p = curproc; 391 struct plimit *limit; 392 393 /* 394 * p is NULL when kern_getrlimit is called from a 395 * kernel thread. In this case as the calling proc 396 * isn't available we just skip the limit check. 397 */ 398 if (p == NULL) 399 return 0; 400 401 if (which >= RLIM_NLIMITS) 402 return (EINVAL); 403 404 limit = readplimits(p); 405 *limp = limit->pl_rlimit[which]; 406 407 return (0); 408 } 409 410 /* 411 * Determine if the cpu limit has been reached and return an operations 412 * code for the caller to perform. 413 */ 414 int 415 plimit_testcpulimit(struct proc *p, u_int64_t ttime) 416 { 417 struct plimit *limit; 418 struct rlimit *rlim; 419 int mode; 420 421 limit = readplimits(p); 422 423 /* 424 * Initial tests without the spinlock. This is the fast path. 425 * Any 32/64 bit glitches will fall through and retest with 426 * the spinlock. 427 */ 428 if (limit->p_cpulimit == RLIM_INFINITY) 429 return(PLIMIT_TESTCPU_OK); 430 if (ttime <= limit->p_cpulimit) 431 return(PLIMIT_TESTCPU_OK); 432 433 if (ttime > limit->p_cpulimit) { 434 rlim = &limit->pl_rlimit[RLIMIT_CPU]; 435 if (ttime / (rlim_t)1000000 >= rlim->rlim_max + 5) 436 mode = PLIMIT_TESTCPU_KILL; 437 else 438 mode = PLIMIT_TESTCPU_XCPU; 439 } else { 440 mode = PLIMIT_TESTCPU_OK; 441 } 442 443 return(mode); 444 } 445 446 /* 447 * Helper routine to copy olimit to nlimit and initialize nlimit for 448 * use. nlimit's reference count will be set to 1 and its exclusive bit 449 * will be cleared. 450 */ 451 static 452 void 453 plimit_copy(struct plimit *olimit, struct plimit *nlimit) 454 { 455 *nlimit = *olimit; 456 457 spin_init(&nlimit->p_spin, "plimitcopy"); 458 nlimit->p_refcnt = 1; 459 } 460 461 /* 462 * This routine returns the value of a resource, downscaled based on 463 * the processes fork depth and chroot depth (up to 50%). This mechanism 464 * is designed to prevent run-aways from blowing up unrelated processes 465 * running under the same UID. 466 * 467 * NOTE: Currently only applicable to RLIMIT_NPROC. We could also limit 468 * file descriptors but we shouldn't have to as these are allocated 469 * dynamically. 470 */ 471 u_int64_t 472 plimit_getadjvalue(int i) 473 { 474 struct proc *p = curproc; 475 struct plimit *limit; 476 uint64_t v; 477 uint32_t depth; 478 479 limit = p->p_limit; 480 v = limit->pl_rlimit[i].rlim_cur; 481 if (i == RLIMIT_NPROC) { 482 /* 483 * 10% per chroot (around 1/3% per fork depth), with a 484 * maximum of 50% downscaling of the resource limit. 485 */ 486 depth = p->p_depth; 487 if (depth > 32 * 5) 488 depth = 32 * 5; 489 v -= v * depth / 320; 490 } 491 return v; 492 } 493