1 /* $OpenBSD: kern_exec.c,v 1.228 2021/12/09 00:26:10 guenther Exp $ */ 2 /* $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $ */ 3 4 /*- 5 * Copyright (C) 1993, 1994 Christopher G. Demetriou 6 * Copyright (C) 1992 Wolfgang Solfrank. 7 * Copyright (C) 1992 TooLs GmbH. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by TooLs GmbH. 21 * 4. The name of TooLs GmbH may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 27 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 29 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 30 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/filedesc.h> 39 #include <sys/kernel.h> 40 #include <sys/proc.h> 41 #include <sys/mount.h> 42 #include <sys/malloc.h> 43 #include <sys/pool.h> 44 #include <sys/namei.h> 45 #include <sys/vnode.h> 46 #include <sys/fcntl.h> 47 #include <sys/file.h> 48 #include <sys/acct.h> 49 #include <sys/exec.h> 50 #include <sys/exec_elf.h> 51 #include <sys/ktrace.h> 52 #include <sys/resourcevar.h> 53 #include <sys/wait.h> 54 #include <sys/mman.h> 55 #include <sys/signalvar.h> 56 #include <sys/stat.h> 57 #include <sys/conf.h> 58 #include <sys/pledge.h> 59 #ifdef SYSVSHM 60 #include <sys/shm.h> 61 #endif 62 63 #include <sys/syscallargs.h> 64 65 #include <uvm/uvm_extern.h> 66 #include <machine/tcb.h> 67 68 #include <sys/timetc.h> 69 70 struct uvm_object *sigobject; /* shared sigcode object */ 71 struct uvm_object *timekeep_object; 72 struct timekeep *timekeep; 73 74 void unveil_destroy(struct process *ps); 75 76 const struct kmem_va_mode kv_exec = { 77 .kv_wait = 1, 78 .kv_map = &exec_map 79 }; 80 81 /* 82 * Map the shared signal code. 83 */ 84 int exec_sigcode_map(struct process *); 85 86 /* 87 * Map the shared timekeep page. 88 */ 89 int exec_timekeep_map(struct process *); 90 91 /* 92 * If non-zero, stackgap_random specifies the upper limit of the random gap size 93 * added to the fixed stack position. Must be n^2. 94 */ 95 int stackgap_random = STACKGAP_RANDOM; 96 97 /* 98 * check exec: 99 * given an "executable" described in the exec package's namei info, 100 * see what we can do with it. 101 * 102 * ON ENTRY: 103 * exec package with appropriate namei info 104 * proc pointer of exec'ing proc 105 * NO SELF-LOCKED VNODES 106 * 107 * ON EXIT: 108 * error: nothing held, etc. exec header still allocated. 109 * ok: filled exec package, one locked vnode. 110 * 111 * EXEC SWITCH ENTRY: 112 * Locked vnode to check, exec package, proc. 113 * 114 * EXEC SWITCH EXIT: 115 * ok: return 0, filled exec package, one locked vnode. 116 * error: destructive: 117 * everything deallocated except exec header. 118 * non-destructive: 119 * error code, locked vnode, exec header unmodified 120 */ 121 int 122 check_exec(struct proc *p, struct exec_package *epp) 123 { 124 int error, i; 125 struct vnode *vp; 126 struct nameidata *ndp; 127 size_t resid; 128 129 ndp = epp->ep_ndp; 130 ndp->ni_cnd.cn_nameiop = LOOKUP; 131 ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME; 132 if (epp->ep_flags & EXEC_INDIR) 133 ndp->ni_cnd.cn_flags |= BYPASSUNVEIL; 134 /* first get the vnode */ 135 if ((error = namei(ndp)) != 0) 136 return (error); 137 epp->ep_vp = vp = ndp->ni_vp; 138 139 /* check for regular file */ 140 if (vp->v_type != VREG) { 141 error = EACCES; 142 goto bad1; 143 } 144 145 /* get attributes */ 146 if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0) 147 goto bad1; 148 149 /* Check mount point */ 150 if (vp->v_mount->mnt_flag & MNT_NOEXEC) { 151 error = EACCES; 152 goto bad1; 153 } 154 155 /* SUID programs may not be started with execpromises */ 156 if ((epp->ep_vap->va_mode & (VSUID | VSGID)) && 157 (p->p_p->ps_flags & PS_EXECPLEDGE)) { 158 error = EACCES; 159 goto bad1; 160 } 161 162 if ((vp->v_mount->mnt_flag & MNT_NOSUID)) 163 epp->ep_vap->va_mode &= ~(VSUID | VSGID); 164 165 /* check access. for root we have to see if any exec bit on */ 166 if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0) 167 goto bad1; 168 if ((epp->ep_vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) { 169 error = EACCES; 170 goto bad1; 171 } 172 173 /* try to open it */ 174 if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0) 175 goto bad1; 176 177 /* unlock vp, we need it unlocked from here */ 178 VOP_UNLOCK(vp); 179 180 /* now we have the file, get the exec header */ 181 error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0, 182 UIO_SYSSPACE, 0, p->p_ucred, &resid, p); 183 if (error) 184 goto bad2; 185 epp->ep_hdrvalid = epp->ep_hdrlen - resid; 186 187 /* 188 * set up the vmcmds for creation of the process 189 * address space 190 */ 191 error = ENOEXEC; 192 for (i = 0; i < nexecs && error != 0; i++) { 193 int newerror; 194 195 if (execsw[i].es_check == NULL) 196 continue; 197 newerror = (*execsw[i].es_check)(p, epp); 198 /* make sure the first "interesting" error code is saved. */ 199 if (!newerror || error == ENOEXEC) 200 error = newerror; 201 if (epp->ep_flags & EXEC_DESTR && error != 0) 202 return (error); 203 } 204 if (!error) { 205 /* check that entry point is sane */ 206 if (epp->ep_entry > VM_MAXUSER_ADDRESS) { 207 error = ENOEXEC; 208 } 209 210 /* check limits */ 211 if ((epp->ep_tsize > MAXTSIZ) || 212 (epp->ep_dsize > lim_cur(RLIMIT_DATA))) 213 error = ENOMEM; 214 215 if (!error) 216 return (0); 217 } 218 219 /* 220 * free any vmspace-creation commands, 221 * and release their references 222 */ 223 kill_vmcmds(&epp->ep_vmcmds); 224 225 bad2: 226 /* 227 * close the vnode, free the pathname buf, and punt. 228 */ 229 vn_close(vp, FREAD, p->p_ucred, p); 230 pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf); 231 return (error); 232 233 bad1: 234 /* 235 * free the namei pathname buffer, and put the vnode 236 * (which we don't yet have open). 237 */ 238 pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf); 239 vput(vp); 240 return (error); 241 } 242 243 /* 244 * exec system call 245 */ 246 int 247 sys_execve(struct proc *p, void *v, register_t *retval) 248 { 249 struct sys_execve_args /* { 250 syscallarg(const char *) path; 251 syscallarg(char *const *) argp; 252 syscallarg(char *const *) envp; 253 } */ *uap = v; 254 int error; 255 struct exec_package pack; 256 struct nameidata nid; 257 struct vattr attr; 258 struct ucred *cred = p->p_ucred; 259 char *argp; 260 char * const *cpp, *dp, *sp; 261 #ifdef KTRACE 262 char *env_start; 263 #endif 264 struct process *pr = p->p_p; 265 long argc, envc; 266 size_t len, sgap, dstsize; 267 #ifdef MACHINE_STACK_GROWS_UP 268 size_t slen; 269 #endif 270 char *stack; 271 struct ps_strings arginfo; 272 struct vmspace *vm; 273 struct vnode *otvp; 274 275 /* get other threads to stop */ 276 if ((error = single_thread_set(p, SINGLE_UNWIND, 1))) 277 return (error); 278 279 /* 280 * Cheap solution to complicated problems. 281 * Mark this process as "leave me alone, I'm execing". 282 */ 283 atomic_setbits_int(&pr->ps_flags, PS_INEXEC); 284 285 NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p); 286 nid.ni_pledge = PLEDGE_EXEC; 287 nid.ni_unveil = UNVEIL_EXEC; 288 289 /* 290 * initialize the fields of the exec package. 291 */ 292 pack.ep_name = (char *)SCARG(uap, path); 293 pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK); 294 pack.ep_hdrlen = exec_maxhdrsz; 295 pack.ep_hdrvalid = 0; 296 pack.ep_ndp = &nid; 297 pack.ep_interp = NULL; 298 pack.ep_args = NULL; 299 pack.ep_auxinfo = NULL; 300 VMCMDSET_INIT(&pack.ep_vmcmds); 301 pack.ep_vap = &attr; 302 pack.ep_flags = 0; 303 304 /* see if we can run it. */ 305 if ((error = check_exec(p, &pack)) != 0) { 306 goto freehdr; 307 } 308 309 /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */ 310 311 /* allocate an argument buffer */ 312 argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok); 313 #ifdef DIAGNOSTIC 314 if (argp == NULL) 315 panic("execve: argp == NULL"); 316 #endif 317 dp = argp; 318 argc = 0; 319 320 /* 321 * Copy the fake args list, if there's one, freeing it as we go. 322 * exec_script_makecmds() allocates either 2 or 3 fake args bounded 323 * by MAXINTERP + MAXPATHLEN < NCARGS so no overflow can happen. 324 */ 325 if (pack.ep_flags & EXEC_HASARGL) { 326 dstsize = NCARGS; 327 for(; pack.ep_fa[argc] != NULL; argc++) { 328 len = strlcpy(dp, pack.ep_fa[argc], dstsize); 329 len++; 330 dp += len; dstsize -= len; 331 if (pack.ep_fa[argc+1] != NULL) 332 free(pack.ep_fa[argc], M_EXEC, len); 333 else 334 free(pack.ep_fa[argc], M_EXEC, MAXPATHLEN); 335 } 336 free(pack.ep_fa, M_EXEC, 4 * sizeof(char *)); 337 pack.ep_flags &= ~EXEC_HASARGL; 338 } 339 340 /* Now get argv & environment */ 341 if (!(cpp = SCARG(uap, argp))) { 342 error = EFAULT; 343 goto bad; 344 } 345 346 if (pack.ep_flags & EXEC_SKIPARG) 347 cpp++; 348 349 while (1) { 350 len = argp + ARG_MAX - dp; 351 if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) 352 goto bad; 353 if (!sp) 354 break; 355 if ((error = copyinstr(sp, dp, len, &len)) != 0) { 356 if (error == ENAMETOOLONG) 357 error = E2BIG; 358 goto bad; 359 } 360 dp += len; 361 cpp++; 362 argc++; 363 } 364 365 /* must have at least one argument */ 366 if (argc == 0) { 367 error = EINVAL; 368 goto bad; 369 } 370 371 #ifdef KTRACE 372 if (KTRPOINT(p, KTR_EXECARGS)) 373 ktrexec(p, KTR_EXECARGS, argp, dp - argp); 374 #endif 375 376 envc = 0; 377 /* environment does not need to be there */ 378 if ((cpp = SCARG(uap, envp)) != NULL ) { 379 #ifdef KTRACE 380 env_start = dp; 381 #endif 382 while (1) { 383 len = argp + ARG_MAX - dp; 384 if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) 385 goto bad; 386 if (!sp) 387 break; 388 if ((error = copyinstr(sp, dp, len, &len)) != 0) { 389 if (error == ENAMETOOLONG) 390 error = E2BIG; 391 goto bad; 392 } 393 dp += len; 394 cpp++; 395 envc++; 396 } 397 398 #ifdef KTRACE 399 if (KTRPOINT(p, KTR_EXECENV)) 400 ktrexec(p, KTR_EXECENV, env_start, dp - env_start); 401 #endif 402 } 403 404 dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES); 405 406 sgap = STACKGAPLEN; 407 408 /* 409 * If we have enabled random stackgap, the stack itself has already 410 * been moved from a random location, but is still aligned to a page 411 * boundary. Provide the lower bits of random placement now. 412 */ 413 if (stackgap_random != 0) { 414 sgap += arc4random() & PAGE_MASK; 415 sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES; 416 } 417 418 /* Now check if args & environ fit into new stack */ 419 len = ((argc + envc + 2 + ELF_AUX_WORDS) * sizeof(char *) + 420 sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp; 421 422 len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES; 423 424 if (len > pack.ep_ssize) { /* in effect, compare to initial limit */ 425 error = ENOMEM; 426 goto bad; 427 } 428 429 /* adjust "active stack depth" for process VSZ */ 430 pack.ep_ssize = len; /* maybe should go elsewhere, but... */ 431 432 /* 433 * we're committed: any further errors will kill the process, so 434 * kill the other threads now. 435 */ 436 single_thread_set(p, SINGLE_EXIT, 1); 437 438 /* 439 * Prepare vmspace for remapping. Note that uvmspace_exec can replace 440 * ps_vmspace! 441 */ 442 uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 443 444 vm = pr->ps_vmspace; 445 /* Now map address space */ 446 vm->vm_taddr = (char *)trunc_page(pack.ep_taddr); 447 vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) - 448 trunc_page(pack.ep_taddr)); 449 vm->vm_daddr = (char *)trunc_page(pack.ep_daddr); 450 vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) - 451 trunc_page(pack.ep_daddr)); 452 vm->vm_dused = 0; 453 vm->vm_ssize = atop(round_page(pack.ep_ssize)); 454 vm->vm_maxsaddr = (char *)pack.ep_maxsaddr; 455 vm->vm_minsaddr = (char *)pack.ep_minsaddr; 456 457 /* create the new process's VM space by running the vmcmds */ 458 #ifdef DIAGNOSTIC 459 if (pack.ep_vmcmds.evs_used == 0) 460 panic("execve: no vmcmds"); 461 #endif 462 error = exec_process_vmcmds(p, &pack); 463 464 /* if an error happened, deallocate and punt */ 465 if (error) 466 goto exec_abort; 467 468 #ifdef MACHINE_STACK_GROWS_UP 469 pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap; 470 if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr, 471 trunc_page(pr->ps_strings), PROT_NONE, TRUE)) 472 goto exec_abort; 473 #else 474 pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap; 475 if (uvm_map_protect(&vm->vm_map, 476 round_page(pr->ps_strings + sizeof(arginfo)), 477 (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE)) 478 goto exec_abort; 479 #endif 480 481 memset(&arginfo, 0, sizeof(arginfo)); 482 483 /* remember information about the process */ 484 arginfo.ps_nargvstr = argc; 485 arginfo.ps_nenvstr = envc; 486 487 #ifdef MACHINE_STACK_GROWS_UP 488 stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap; 489 slen = len - sizeof(arginfo) - sgap; 490 #else 491 stack = (char *)(vm->vm_minsaddr - len); 492 #endif 493 /* Now copy argc, args & environ to new stack */ 494 if (!copyargs(&pack, &arginfo, stack, argp)) 495 goto exec_abort; 496 497 /* copy out the process's ps_strings structure */ 498 if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo))) 499 goto exec_abort; 500 501 stopprofclock(pr); /* stop profiling */ 502 fdcloseexec(p); /* handle close on exec */ 503 execsigs(p); /* reset caught signals */ 504 TCB_SET(p, NULL); /* reset the TCB address */ 505 pr->ps_kbind_addr = 0; /* reset the kbind bits */ 506 pr->ps_kbind_cookie = 0; 507 arc4random_buf(&pr->ps_sigcookie, sizeof pr->ps_sigcookie); 508 509 /* set command name & other accounting info */ 510 memset(pr->ps_comm, 0, sizeof(pr->ps_comm)); 511 len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN); 512 memcpy(pr->ps_comm, nid.ni_cnd.cn_nameptr, len); 513 pr->ps_acflag &= ~AFORK; 514 515 /* record proc's vnode, for use by sysctl */ 516 otvp = pr->ps_textvp; 517 vref(pack.ep_vp); 518 pr->ps_textvp = pack.ep_vp; 519 if (otvp) 520 vrele(otvp); 521 522 atomic_setbits_int(&pr->ps_flags, PS_EXEC); 523 if (pr->ps_flags & PS_PPWAIT) { 524 atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); 525 atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT); 526 wakeup(pr->ps_pptr); 527 } 528 529 /* 530 * If process does execve() while it has a mismatched real, 531 * effective, or saved uid/gid, we set PS_SUGIDEXEC. 532 */ 533 if (cred->cr_uid != cred->cr_ruid || 534 cred->cr_uid != cred->cr_svuid || 535 cred->cr_gid != cred->cr_rgid || 536 cred->cr_gid != cred->cr_svgid) 537 atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC); 538 else 539 atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC); 540 541 if (pr->ps_flags & PS_EXECPLEDGE) { 542 pr->ps_pledge = pr->ps_execpledge; 543 atomic_setbits_int(&pr->ps_flags, PS_PLEDGE); 544 } else { 545 atomic_clearbits_int(&pr->ps_flags, PS_PLEDGE); 546 pr->ps_pledge = 0; 547 /* XXX XXX XXX XXX */ 548 /* Clear our unveil paths out so the child 549 * starts afresh 550 */ 551 unveil_destroy(pr); 552 pr->ps_uvdone = 0; 553 } 554 555 /* 556 * deal with set[ug]id. 557 * MNT_NOEXEC has already been used to disable s[ug]id. 558 */ 559 if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) { 560 int i; 561 562 atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC); 563 564 #ifdef KTRACE 565 /* 566 * If process is being ktraced, turn off - unless 567 * root set it. 568 */ 569 if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT)) 570 ktrcleartrace(pr); 571 #endif 572 p->p_ucred = cred = crcopy(cred); 573 if (attr.va_mode & VSUID) 574 cred->cr_uid = attr.va_uid; 575 if (attr.va_mode & VSGID) 576 cred->cr_gid = attr.va_gid; 577 578 /* 579 * For set[ug]id processes, a few caveats apply to 580 * stdin, stdout, and stderr. 581 */ 582 error = 0; 583 fdplock(p->p_fd); 584 for (i = 0; i < 3; i++) { 585 struct file *fp = NULL; 586 587 /* 588 * NOTE - This will never return NULL because of 589 * immature fds. The file descriptor table is not 590 * shared because we're suid. 591 */ 592 fp = fd_getfile(p->p_fd, i); 593 594 /* 595 * Ensure that stdin, stdout, and stderr are already 596 * allocated. We do not want userland to accidentally 597 * allocate descriptors in this range which has implied 598 * meaning to libc. 599 */ 600 if (fp == NULL) { 601 short flags = FREAD | (i == 0 ? 0 : FWRITE); 602 struct vnode *vp; 603 int indx; 604 605 if ((error = falloc(p, &fp, &indx)) != 0) 606 break; 607 #ifdef DIAGNOSTIC 608 if (indx != i) 609 panic("sys_execve: falloc indx != i"); 610 #endif 611 if ((error = cdevvp(getnulldev(), &vp)) != 0) { 612 fdremove(p->p_fd, indx); 613 closef(fp, p); 614 break; 615 } 616 if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) { 617 fdremove(p->p_fd, indx); 618 closef(fp, p); 619 vrele(vp); 620 break; 621 } 622 if (flags & FWRITE) 623 vp->v_writecount++; 624 fp->f_flag = flags; 625 fp->f_type = DTYPE_VNODE; 626 fp->f_ops = &vnops; 627 fp->f_data = (caddr_t)vp; 628 fdinsert(p->p_fd, indx, 0, fp); 629 } 630 FRELE(fp, p); 631 } 632 fdpunlock(p->p_fd); 633 if (error) 634 goto exec_abort; 635 } else 636 atomic_clearbits_int(&pr->ps_flags, PS_SUGID); 637 638 /* 639 * Reset the saved ugids and update the process's copy of the 640 * creds if the creds have been changed 641 */ 642 if (cred->cr_uid != cred->cr_svuid || 643 cred->cr_gid != cred->cr_svgid) { 644 /* make sure we have unshared ucreds */ 645 p->p_ucred = cred = crcopy(cred); 646 cred->cr_svuid = cred->cr_uid; 647 cred->cr_svgid = cred->cr_gid; 648 } 649 650 if (pr->ps_ucred != cred) { 651 struct ucred *ocred; 652 653 ocred = pr->ps_ucred; 654 crhold(cred); 655 pr->ps_ucred = cred; 656 crfree(ocred); 657 } 658 659 if (pr->ps_flags & PS_SUGIDEXEC) { 660 cancel_all_itimers(); 661 } 662 663 /* reset CPU time usage for the thread, but not the process */ 664 timespecclear(&p->p_tu.tu_runtime); 665 p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0; 666 667 km_free(argp, NCARGS, &kv_exec, &kp_pageable); 668 669 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); 670 vn_close(pack.ep_vp, FREAD, cred, p); 671 672 /* 673 * notify others that we exec'd 674 */ 675 KNOTE(&pr->ps_klist, NOTE_EXEC); 676 677 /* map the process's timekeep page, needs to be before exec_elf_fixup */ 678 if (exec_timekeep_map(pr)) 679 goto free_pack_abort; 680 681 /* setup new registers and do misc. setup. */ 682 if (exec_elf_fixup(p, &pack) != 0) 683 goto free_pack_abort; 684 #ifdef MACHINE_STACK_GROWS_UP 685 setregs(p, &pack, (u_long)stack + slen, retval); 686 #else 687 setregs(p, &pack, (u_long)stack, retval); 688 #endif 689 690 /* map the process's signal trampoline code */ 691 if (exec_sigcode_map(pr)) 692 goto free_pack_abort; 693 694 #ifdef __HAVE_EXEC_MD_MAP 695 /* perform md specific mappings that process might need */ 696 if (exec_md_map(p, &pack)) 697 goto free_pack_abort; 698 #endif 699 700 if (pr->ps_flags & PS_TRACED) 701 psignal(p, SIGTRAP); 702 703 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); 704 705 p->p_descfd = 255; 706 if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255) 707 p->p_descfd = pack.ep_fd; 708 709 if (pack.ep_flags & EXEC_WXNEEDED) 710 atomic_setbits_int(&p->p_p->ps_flags, PS_WXNEEDED); 711 else 712 atomic_clearbits_int(&p->p_p->ps_flags, PS_WXNEEDED); 713 714 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); 715 single_thread_clear(p, P_SUSPSIG); 716 717 return (0); 718 719 bad: 720 /* free the vmspace-creation commands, and release their references */ 721 kill_vmcmds(&pack.ep_vmcmds); 722 /* kill any opened file descriptor, if necessary */ 723 if (pack.ep_flags & EXEC_HASFD) { 724 pack.ep_flags &= ~EXEC_HASFD; 725 fdplock(p->p_fd); 726 /* fdrelease unlocks p->p_fd. */ 727 (void) fdrelease(p, pack.ep_fd); 728 } 729 if (pack.ep_interp != NULL) 730 pool_put(&namei_pool, pack.ep_interp); 731 free(pack.ep_args, M_TEMP, sizeof *pack.ep_args); 732 /* close and put the exec'd file */ 733 vn_close(pack.ep_vp, FREAD, cred, p); 734 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); 735 km_free(argp, NCARGS, &kv_exec, &kp_pageable); 736 737 freehdr: 738 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); 739 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); 740 single_thread_clear(p, P_SUSPSIG); 741 742 return (error); 743 744 exec_abort: 745 /* 746 * the old process doesn't exist anymore. exit gracefully. 747 * get rid of the (new) address space we have created, if any, get rid 748 * of our namei data and vnode, and exit noting failure 749 */ 750 uvm_unmap(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 751 if (pack.ep_interp != NULL) 752 pool_put(&namei_pool, pack.ep_interp); 753 free(pack.ep_args, M_TEMP, sizeof *pack.ep_args); 754 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); 755 vn_close(pack.ep_vp, FREAD, cred, p); 756 km_free(argp, NCARGS, &kv_exec, &kp_pageable); 757 758 free_pack_abort: 759 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); 760 exit1(p, 0, SIGABRT, EXIT_NORMAL); 761 762 /* NOTREACHED */ 763 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); 764 765 return (0); 766 } 767 768 769 int 770 copyargs(struct exec_package *pack, struct ps_strings *arginfo, void *stack, 771 void *argp) 772 { 773 char **cpp = stack; 774 char *dp, *sp; 775 size_t len; 776 void *nullp = NULL; 777 long argc = arginfo->ps_nargvstr; 778 int envc = arginfo->ps_nenvstr; 779 780 if (copyout(&argc, cpp++, sizeof(argc))) 781 return (0); 782 783 dp = (char *) (cpp + argc + envc + 2 + ELF_AUX_WORDS); 784 sp = argp; 785 786 /* XXX don't copy them out, remap them! */ 787 arginfo->ps_argvstr = cpp; /* remember location of argv for later */ 788 789 for (; --argc >= 0; sp += len, dp += len) 790 if (copyout(&dp, cpp++, sizeof(dp)) || 791 copyoutstr(sp, dp, ARG_MAX, &len)) 792 return (0); 793 794 if (copyout(&nullp, cpp++, sizeof(nullp))) 795 return (0); 796 797 arginfo->ps_envstr = cpp; /* remember location of envp for later */ 798 799 for (; --envc >= 0; sp += len, dp += len) 800 if (copyout(&dp, cpp++, sizeof(dp)) || 801 copyoutstr(sp, dp, ARG_MAX, &len)) 802 return (0); 803 804 if (copyout(&nullp, cpp++, sizeof(nullp))) 805 return (0); 806 807 /* if this process needs auxinfo, note where to place it */ 808 if (pack->ep_args != NULL) 809 pack->ep_auxinfo = cpp; 810 811 return (1); 812 } 813 814 int 815 exec_sigcode_map(struct process *pr) 816 { 817 extern char sigcode[], esigcode[], sigcoderet[]; 818 vsize_t sz; 819 820 sz = (vaddr_t)esigcode - (vaddr_t)sigcode; 821 822 /* 823 * If we don't have a sigobject yet, create one. 824 * 825 * sigobject is an anonymous memory object (just like SYSV shared 826 * memory) that we keep a permanent reference to and that we map 827 * in all processes that need this sigcode. The creation is simple, 828 * we create an object, add a permanent reference to it, map it in 829 * kernel space, copy out the sigcode to it and unmap it. 830 * Then we map it with PROT_READ|PROT_EXEC into the process just 831 * the way sys_mmap would map it. 832 */ 833 if (sigobject == NULL) { 834 extern int sigfillsiz; 835 extern u_char sigfill[]; 836 size_t off, left; 837 vaddr_t va; 838 int r; 839 840 sigobject = uao_create(sz, 0); 841 uao_reference(sigobject); /* permanent reference */ 842 843 if ((r = uvm_map(kernel_map, &va, round_page(sz), sigobject, 844 0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 845 MAP_INHERIT_SHARE, MADV_RANDOM, 0)))) { 846 uao_detach(sigobject); 847 return (ENOMEM); 848 } 849 850 for (off = 0, left = round_page(sz); left != 0; 851 off += sigfillsiz) { 852 size_t chunk = ulmin(left, sigfillsiz); 853 memcpy((caddr_t)va + off, sigfill, chunk); 854 left -= chunk; 855 } 856 memcpy((caddr_t)va, sigcode, sz); 857 uvm_unmap(kernel_map, va, va + round_page(sz)); 858 } 859 860 pr->ps_sigcode = 0; /* no hint */ 861 uao_reference(sigobject); 862 if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_sigcode, round_page(sz), 863 sigobject, 0, 0, UVM_MAPFLAG(PROT_READ | PROT_EXEC, 864 PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY, 865 MADV_RANDOM, UVM_FLAG_COPYONW | UVM_FLAG_SYSCALL))) { 866 uao_detach(sigobject); 867 return (ENOMEM); 868 } 869 870 /* Calculate PC at point of sigreturn entry */ 871 pr->ps_sigcoderet = pr->ps_sigcode + (sigcoderet - sigcode); 872 873 return (0); 874 } 875 876 int 877 exec_timekeep_map(struct process *pr) 878 { 879 size_t timekeep_sz = round_page(sizeof(struct timekeep)); 880 881 /* 882 * Similar to the sigcode object 883 */ 884 if (timekeep_object == NULL) { 885 vaddr_t va = 0; 886 887 timekeep_object = uao_create(timekeep_sz, 0); 888 uao_reference(timekeep_object); 889 890 if (uvm_map(kernel_map, &va, timekeep_sz, timekeep_object, 891 0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 892 MAP_INHERIT_SHARE, MADV_RANDOM, 0))) { 893 uao_detach(timekeep_object); 894 timekeep_object = NULL; 895 return (ENOMEM); 896 } 897 if (uvm_fault_wire(kernel_map, va, va + timekeep_sz, 898 PROT_READ | PROT_WRITE)) { 899 uvm_unmap(kernel_map, va, va + timekeep_sz); 900 uao_detach(timekeep_object); 901 timekeep_object = NULL; 902 return (ENOMEM); 903 } 904 905 timekeep = (struct timekeep *)va; 906 timekeep->tk_version = TK_VERSION; 907 } 908 909 pr->ps_timekeep = 0; /* no hint */ 910 uao_reference(timekeep_object); 911 if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_timekeep, timekeep_sz, 912 timekeep_object, 0, 0, UVM_MAPFLAG(PROT_READ, PROT_READ, 913 MAP_INHERIT_COPY, MADV_RANDOM, 0))) { 914 uao_detach(timekeep_object); 915 return (ENOMEM); 916 } 917 918 return (0); 919 } 920