1 /* $OpenBSD: kern_exec.c,v 1.260 2025/01/25 19:21:40 claudio Exp $ */
2 /* $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $ */
3
4 /*-
5 * Copyright (C) 1993, 1994 Christopher G. Demetriou
6 * Copyright (C) 1992 Wolfgang Solfrank.
7 * Copyright (C) 1992 TooLs GmbH.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by TooLs GmbH.
21 * 4. The name of TooLs GmbH may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
29 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
30 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/filedesc.h>
39 #include <sys/proc.h>
40 #include <sys/user.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/pool.h>
44 #include <sys/namei.h>
45 #include <sys/vnode.h>
46 #include <sys/fcntl.h>
47 #include <sys/file.h>
48 #include <sys/acct.h>
49 #include <sys/exec.h>
50 #include <sys/exec_elf.h>
51 #include <sys/ktrace.h>
52 #include <sys/resourcevar.h>
53 #include <sys/mman.h>
54 #include <sys/signalvar.h>
55 #include <sys/stat.h>
56 #include <sys/conf.h>
57 #include <sys/pledge.h>
58 #ifdef SYSVSHM
59 #include <sys/shm.h>
60 #endif
61
62 #include <sys/syscallargs.h>
63
64 #include <uvm/uvm_extern.h>
65 #include <machine/tcb.h>
66
67 #include <sys/timetc.h>
68
69 struct uvm_object *sigobject; /* shared sigcode object */
70 vaddr_t sigcode_va;
71 vsize_t sigcode_sz;
72 struct uvm_object *timekeep_object;
73 struct timekeep *timekeep;
74
75 void unveil_destroy(struct process *ps);
76
77 const struct kmem_va_mode kv_exec = {
78 .kv_wait = 1,
79 .kv_map = &exec_map
80 };
81
82 /*
83 * Map the shared signal code.
84 */
85 int exec_sigcode_map(struct process *);
86
87 /*
88 * Map the shared timekeep page.
89 */
90 int exec_timekeep_map(struct process *);
91
92 /*
93 * If non-zero, stackgap_random specifies the upper limit of the random gap size
94 * added to the fixed stack position. Must be n^2.
95 */
96 int stackgap_random = STACKGAP_RANDOM;
97
98 /*
99 * check exec:
100 * given an "executable" described in the exec package's namei info,
101 * see what we can do with it.
102 *
103 * ON ENTRY:
104 * exec package with appropriate namei info
105 * proc pointer of exec'ing proc
106 * NO SELF-LOCKED VNODES
107 *
108 * ON EXIT:
109 * error: nothing held, etc. exec header still allocated.
110 * ok: filled exec package, one locked vnode.
111 *
112 * EXEC SWITCH ENTRY:
113 * Locked vnode to check, exec package, proc.
114 *
115 * EXEC SWITCH EXIT:
116 * ok: return 0, filled exec package, one locked vnode.
117 * error: destructive:
118 * everything deallocated except exec header.
119 * non-destructive:
120 * error code, locked vnode, exec header unmodified
121 */
122 int
check_exec(struct proc * p,struct exec_package * epp)123 check_exec(struct proc *p, struct exec_package *epp)
124 {
125 int error, i;
126 struct vnode *vp;
127 struct nameidata *ndp;
128 size_t resid;
129
130 ndp = epp->ep_ndp;
131 ndp->ni_cnd.cn_nameiop = LOOKUP;
132 ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME;
133 if (epp->ep_flags & EXEC_INDIR)
134 ndp->ni_cnd.cn_flags |= BYPASSUNVEIL;
135 /* first get the vnode */
136 if ((error = namei(ndp)) != 0)
137 return (error);
138 epp->ep_vp = vp = ndp->ni_vp;
139
140 /* check for regular file */
141 if (vp->v_type != VREG) {
142 error = EACCES;
143 goto bad1;
144 }
145
146 /* get attributes */
147 if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0)
148 goto bad1;
149
150 /* Check mount point */
151 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
152 error = EACCES;
153 goto bad1;
154 }
155
156 /* SUID programs may not be started with execpromises */
157 if ((epp->ep_vap->va_mode & (VSUID | VSGID)) &&
158 (p->p_p->ps_flags & PS_EXECPLEDGE)) {
159 error = EACCES;
160 goto bad1;
161 }
162
163 if ((vp->v_mount->mnt_flag & MNT_NOSUID))
164 epp->ep_vap->va_mode &= ~(VSUID | VSGID);
165
166 /* check access. for root we have to see if any exec bit on */
167 if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0)
168 goto bad1;
169 if ((epp->ep_vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
170 error = EACCES;
171 goto bad1;
172 }
173
174 /* try to open it */
175 if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0)
176 goto bad1;
177
178 /* unlock vp, we need it unlocked from here */
179 VOP_UNLOCK(vp);
180
181 /* now we have the file, get the exec header */
182 error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0,
183 UIO_SYSSPACE, 0, p->p_ucred, &resid, p);
184 if (error)
185 goto bad2;
186 epp->ep_hdrvalid = epp->ep_hdrlen - resid;
187
188 /*
189 * set up the vmcmds for creation of the process
190 * address space
191 */
192 error = ENOEXEC;
193 for (i = 0; i < nexecs && error != 0; i++) {
194 int newerror;
195
196 if (execsw[i].es_check == NULL)
197 continue;
198 newerror = (*execsw[i].es_check)(p, epp);
199 /* make sure the first "interesting" error code is saved. */
200 if (!newerror || error == ENOEXEC)
201 error = newerror;
202 if (epp->ep_flags & EXEC_DESTR && error != 0)
203 return (error);
204 }
205 if (!error) {
206 /* check that entry point is sane */
207 if (epp->ep_entry > VM_MAXUSER_ADDRESS) {
208 error = ENOEXEC;
209 }
210
211 /* check limits */
212 if ((epp->ep_tsize > MAXTSIZ) ||
213 (epp->ep_dsize > lim_cur(RLIMIT_DATA)))
214 error = ENOMEM;
215
216 if (!error)
217 return (0);
218 }
219
220 /*
221 * free any vmspace-creation commands,
222 * and release their references
223 */
224 kill_vmcmds(&epp->ep_vmcmds);
225
226 bad2:
227 /*
228 * close the vnode, free the pathname buf, and punt.
229 */
230 vn_close(vp, FREAD, p->p_ucred, p);
231 pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
232 return (error);
233
234 bad1:
235 /*
236 * free the namei pathname buffer, and put the vnode
237 * (which we don't yet have open).
238 */
239 pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
240 vput(vp);
241 return (error);
242 }
243
244 /*
245 * exec system call
246 */
247 int
sys_execve(struct proc * p,void * v,register_t * retval)248 sys_execve(struct proc *p, void *v, register_t *retval)
249 {
250 struct sys_execve_args /* {
251 syscallarg(const char *) path;
252 syscallarg(char *const *) argp;
253 syscallarg(char *const *) envp;
254 } */ *uap = v;
255 int error;
256 struct exec_package pack;
257 struct nameidata nid;
258 struct vattr attr;
259 struct ucred *cred = p->p_ucred;
260 char *argp;
261 char * const *cpp, *dp, *sp;
262 #ifdef KTRACE
263 char *env_start;
264 #endif
265 struct process *pr = p->p_p;
266 long argc, envc;
267 size_t len, sgap, dstsize;
268 #ifdef MACHINE_STACK_GROWS_UP
269 size_t slen;
270 #endif
271 char *stack;
272 struct ps_strings arginfo;
273 struct vmspace *vm = p->p_vmspace;
274 struct vnode *otvp;
275
276 /*
277 * Get other threads to stop, if contested return ERESTART,
278 * so the syscall is restarted after halting in userret.
279 */
280 if (single_thread_set(p, SINGLE_UNWIND | SINGLE_DEEP))
281 return (ERESTART);
282
283 /*
284 * Cheap solution to complicated problems.
285 * Mark this process as "leave me alone, I'm execing".
286 */
287 atomic_setbits_int(&pr->ps_flags, PS_INEXEC);
288
289 NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p);
290 nid.ni_pledge = PLEDGE_EXEC;
291 nid.ni_unveil = UNVEIL_EXEC;
292
293 /*
294 * initialize the fields of the exec package.
295 */
296 pack.ep_name = (char *)SCARG(uap, path);
297 pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK);
298 pack.ep_hdrlen = exec_maxhdrsz;
299 pack.ep_hdrvalid = 0;
300 pack.ep_ndp = &nid;
301 pack.ep_interp = NULL;
302 pack.ep_args = NULL;
303 pack.ep_auxinfo = NULL;
304 VMCMDSET_INIT(&pack.ep_vmcmds);
305 pack.ep_vap = &attr;
306 pack.ep_flags = 0;
307 pack.ep_pins = NULL;
308 pack.ep_npins = 0;
309
310 /* see if we can run it. */
311 if ((error = check_exec(p, &pack)) != 0) {
312 goto freehdr;
313 }
314
315 /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */
316
317 /* allocate an argument buffer */
318 argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok);
319 #ifdef DIAGNOSTIC
320 if (argp == NULL)
321 panic("execve: argp == NULL");
322 #endif
323 dp = argp;
324 argc = 0;
325
326 /*
327 * Copy the fake args list, if there's one, freeing it as we go.
328 * exec_script_makecmds() allocates either 2 or 3 fake args bounded
329 * by MAXINTERP + MAXPATHLEN < NCARGS so no overflow can happen.
330 */
331 if (pack.ep_flags & EXEC_HASARGL) {
332 dstsize = NCARGS;
333 for(; pack.ep_fa[argc] != NULL; argc++) {
334 len = strlcpy(dp, pack.ep_fa[argc], dstsize);
335 len++;
336 dp += len; dstsize -= len;
337 if (pack.ep_fa[argc+1] != NULL)
338 free(pack.ep_fa[argc], M_EXEC, len);
339 else
340 free(pack.ep_fa[argc], M_EXEC, MAXPATHLEN);
341 }
342 free(pack.ep_fa, M_EXEC, 4 * sizeof(char *));
343 pack.ep_flags &= ~EXEC_HASARGL;
344 }
345
346 /* Now get argv & environment */
347 if (!(cpp = SCARG(uap, argp))) {
348 error = EFAULT;
349 goto bad;
350 }
351
352 if (pack.ep_flags & EXEC_SKIPARG)
353 cpp++;
354
355 while (1) {
356 len = argp + ARG_MAX - dp;
357 if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
358 goto bad;
359 if (!sp)
360 break;
361 if ((error = copyinstr(sp, dp, len, &len)) != 0) {
362 if (error == ENAMETOOLONG)
363 error = E2BIG;
364 goto bad;
365 }
366 dp += len;
367 cpp++;
368 argc++;
369 }
370
371 /* must have at least one argument */
372 if (argc == 0) {
373 error = EINVAL;
374 goto bad;
375 }
376
377 #ifdef KTRACE
378 if (KTRPOINT(p, KTR_EXECARGS))
379 ktrexec(p, KTR_EXECARGS, argp, dp - argp);
380 #endif
381
382 envc = 0;
383 /* environment does not need to be there */
384 if ((cpp = SCARG(uap, envp)) != NULL ) {
385 #ifdef KTRACE
386 env_start = dp;
387 #endif
388 while (1) {
389 len = argp + ARG_MAX - dp;
390 if ((error = copyin(cpp, &sp, sizeof(sp))) != 0)
391 goto bad;
392 if (!sp)
393 break;
394 if ((error = copyinstr(sp, dp, len, &len)) != 0) {
395 if (error == ENAMETOOLONG)
396 error = E2BIG;
397 goto bad;
398 }
399 dp += len;
400 cpp++;
401 envc++;
402 }
403
404 #ifdef KTRACE
405 if (KTRPOINT(p, KTR_EXECENV))
406 ktrexec(p, KTR_EXECENV, env_start, dp - env_start);
407 #endif
408 }
409
410 dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES);
411
412 /*
413 * If we have enabled random stackgap, the stack itself has already
414 * been moved from a random location, but is still aligned to a page
415 * boundary. Provide the lower bits of random placement now.
416 */
417 if (stackgap_random == 0) {
418 sgap = 0;
419 } else {
420 sgap = arc4random() & PAGE_MASK;
421 sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES;
422 }
423
424 /* Now check if args & environ fit into new stack */
425 len = ((argc + envc + 2 + ELF_AUX_WORDS) * sizeof(char *) +
426 sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp;
427
428 len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES;
429
430 if (len > pack.ep_ssize) { /* in effect, compare to initial limit */
431 error = ENOMEM;
432 goto bad;
433 }
434
435 /* adjust "active stack depth" for process VSZ */
436 pack.ep_ssize = len; /* maybe should go elsewhere, but... */
437
438 /*
439 * we're committed: any further errors will kill the process, so
440 * kill the other threads now.
441 */
442 single_thread_set(p, SINGLE_EXIT);
443
444 /*
445 * Prepare vmspace for remapping. Note that uvmspace_exec can replace
446 * ps_vmspace!
447 */
448 uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
449
450 vm = pr->ps_vmspace;
451 /* Now map address space */
452 vm->vm_taddr = (char *)trunc_page(pack.ep_taddr);
453 vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) -
454 trunc_page(pack.ep_taddr));
455 vm->vm_daddr = (char *)trunc_page(pack.ep_daddr);
456 vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) -
457 trunc_page(pack.ep_daddr));
458 vm->vm_dused = 0;
459 vm->vm_ssize = atop(round_page(pack.ep_ssize));
460 vm->vm_maxsaddr = (char *)pack.ep_maxsaddr;
461 vm->vm_minsaddr = (char *)pack.ep_minsaddr;
462
463 /* create the new process's VM space by running the vmcmds */
464 #ifdef DIAGNOSTIC
465 if (pack.ep_vmcmds.evs_used == 0)
466 panic("execve: no vmcmds");
467 #endif
468 error = exec_process_vmcmds(p, &pack);
469
470 /* if an error happened, deallocate and punt */
471 if (error)
472 goto exec_abort;
473
474 #ifdef MACHINE_STACK_GROWS_UP
475 pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap;
476 if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr,
477 trunc_page(pr->ps_strings), PROT_NONE, 0, TRUE, FALSE))
478 goto exec_abort;
479 #else
480 pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap;
481 if (uvm_map_protect(&vm->vm_map,
482 round_page(pr->ps_strings + sizeof(arginfo)),
483 (vaddr_t)vm->vm_minsaddr, PROT_NONE, 0, TRUE, FALSE))
484 goto exec_abort;
485 #endif
486
487 memset(&arginfo, 0, sizeof(arginfo));
488
489 /* remember information about the process */
490 arginfo.ps_nargvstr = argc;
491 arginfo.ps_nenvstr = envc;
492
493 #ifdef MACHINE_STACK_GROWS_UP
494 stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap;
495 slen = len - sizeof(arginfo) - sgap;
496 #else
497 stack = (char *)(vm->vm_minsaddr - len);
498 #endif
499 /* Now copy argc, args & environ to new stack */
500 if (!copyargs(&pack, &arginfo, stack, argp))
501 goto exec_abort;
502
503 pr->ps_auxinfo = (vaddr_t)pack.ep_auxinfo;
504
505 /* copy out the process's ps_strings structure */
506 if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo)))
507 goto exec_abort;
508
509 free(pr->ps_pin.pn_pins, M_PINSYSCALL,
510 pr->ps_pin.pn_npins * sizeof(u_int));
511 if (pack.ep_npins) {
512 pr->ps_pin.pn_start = pack.ep_pinstart;
513 pr->ps_pin.pn_end = pack.ep_pinend;
514 pr->ps_pin.pn_pins = pack.ep_pins;
515 pack.ep_pins = NULL;
516 pr->ps_pin.pn_npins = pack.ep_npins;
517 } else {
518 pr->ps_pin.pn_start = pr->ps_pin.pn_end = 0;
519 pr->ps_pin.pn_pins = NULL;
520 pr->ps_pin.pn_npins = 0;
521 }
522 if (pr->ps_libcpin.pn_pins) {
523 free(pr->ps_libcpin.pn_pins, M_PINSYSCALL,
524 pr->ps_libcpin.pn_npins * sizeof(u_int));
525 pr->ps_libcpin.pn_start = pr->ps_libcpin.pn_end = 0;
526 pr->ps_libcpin.pn_pins = NULL;
527 pr->ps_libcpin.pn_npins = 0;
528 }
529
530 stopprofclock(pr); /* stop profiling */
531 fdcloseexec(p); /* handle close on exec */
532 execsigs(p); /* reset caught signals */
533 TCB_SET(p, NULL); /* reset the TCB address */
534 pr->ps_kbind_addr = 0; /* reset the kbind bits */
535 pr->ps_kbind_cookie = 0;
536 arc4random_buf(&pr->ps_sigcookie, sizeof pr->ps_sigcookie);
537
538 /* set command name & other accounting info */
539 memset(pr->ps_comm, 0, sizeof(pr->ps_comm));
540 strlcpy(pr->ps_comm, nid.ni_cnd.cn_nameptr, sizeof(pr->ps_comm));
541 pr->ps_acflag &= ~AFORK;
542
543 /* record proc's vnode, for use by sysctl */
544 otvp = pr->ps_textvp;
545 vref(pack.ep_vp);
546 pr->ps_textvp = pack.ep_vp;
547 if (otvp)
548 vrele(otvp);
549
550 if (pack.ep_flags & EXEC_NOBTCFI)
551 atomic_setbits_int(&p->p_p->ps_flags, PS_NOBTCFI);
552 else
553 atomic_clearbits_int(&p->p_p->ps_flags, PS_NOBTCFI);
554
555 atomic_setbits_int(&pr->ps_flags, PS_EXEC);
556 if (pr->ps_flags & PS_PPWAIT) {
557 atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT);
558 atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT);
559 wakeup(pr->ps_pptr);
560 }
561
562 /*
563 * If process does execve() while it has a mismatched real,
564 * effective, or saved uid/gid, we set PS_SUGIDEXEC.
565 */
566 if (cred->cr_uid != cred->cr_ruid ||
567 cred->cr_uid != cred->cr_svuid ||
568 cred->cr_gid != cred->cr_rgid ||
569 cred->cr_gid != cred->cr_svgid)
570 atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC);
571 else
572 atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC);
573
574 if (pr->ps_flags & PS_EXECPLEDGE) {
575 pr->ps_pledge = pr->ps_execpledge;
576 atomic_setbits_int(&pr->ps_flags, PS_PLEDGE);
577 } else {
578 atomic_clearbits_int(&pr->ps_flags, PS_PLEDGE);
579 pr->ps_pledge = 0;
580 /* XXX XXX XXX XXX */
581 /* Clear our unveil paths out so the child
582 * starts afresh
583 */
584 unveil_destroy(pr);
585 pr->ps_uvdone = 0;
586 }
587
588 /*
589 * deal with set[ug]id.
590 * MNT_NOEXEC has already been used to disable s[ug]id.
591 */
592 if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) {
593 int i;
594
595 atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC);
596
597 #ifdef KTRACE
598 /*
599 * If process is being ktraced, turn off - unless
600 * root set it.
601 */
602 if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT))
603 ktrcleartrace(pr);
604 #endif
605 p->p_ucred = cred = crcopy(cred);
606 if (attr.va_mode & VSUID)
607 cred->cr_uid = attr.va_uid;
608 if (attr.va_mode & VSGID)
609 cred->cr_gid = attr.va_gid;
610
611 /*
612 * For set[ug]id processes, a few caveats apply to
613 * stdin, stdout, and stderr.
614 */
615 error = 0;
616 fdplock(p->p_fd);
617 for (i = 0; i < 3; i++) {
618 struct file *fp = NULL;
619
620 /*
621 * NOTE - This will never return NULL because of
622 * immature fds. The file descriptor table is not
623 * shared because we're suid.
624 */
625 fp = fd_getfile(p->p_fd, i);
626
627 /*
628 * Ensure that stdin, stdout, and stderr are already
629 * allocated. We do not want userland to accidentally
630 * allocate descriptors in this range which has implied
631 * meaning to libc.
632 */
633 if (fp == NULL) {
634 short flags = FREAD | (i == 0 ? 0 : FWRITE);
635 struct vnode *vp;
636 int indx;
637
638 if ((error = falloc(p, &fp, &indx)) != 0)
639 break;
640 #ifdef DIAGNOSTIC
641 if (indx != i)
642 panic("sys_execve: falloc indx != i");
643 #endif
644 if ((error = cdevvp(getnulldev(), &vp)) != 0) {
645 fdremove(p->p_fd, indx);
646 closef(fp, p);
647 break;
648 }
649 if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) {
650 fdremove(p->p_fd, indx);
651 closef(fp, p);
652 vrele(vp);
653 break;
654 }
655 if (flags & FWRITE)
656 vp->v_writecount++;
657 fp->f_flag = flags;
658 fp->f_type = DTYPE_VNODE;
659 fp->f_ops = &vnops;
660 fp->f_data = (caddr_t)vp;
661 fdinsert(p->p_fd, indx, 0, fp);
662 }
663 FRELE(fp, p);
664 }
665 fdpunlock(p->p_fd);
666 if (error)
667 goto exec_abort;
668 } else
669 atomic_clearbits_int(&pr->ps_flags, PS_SUGID);
670
671 /*
672 * Reset the saved ugids and update the process's copy of the
673 * creds if the creds have been changed
674 */
675 if (cred->cr_uid != cred->cr_svuid ||
676 cred->cr_gid != cred->cr_svgid) {
677 /* make sure we have unshared ucreds */
678 p->p_ucred = cred = crcopy(cred);
679 cred->cr_svuid = cred->cr_uid;
680 cred->cr_svgid = cred->cr_gid;
681 }
682
683 if (pr->ps_ucred != cred) {
684 struct ucred *ocred;
685
686 ocred = pr->ps_ucred;
687 crhold(cred);
688 pr->ps_ucred = cred;
689 crfree(ocred);
690 }
691
692 if (pr->ps_flags & PS_SUGIDEXEC) {
693 cancel_all_itimers();
694 }
695
696 /* reset CPU time usage for the thread, but not the process */
697 timespecclear(&p->p_tu.tu_runtime);
698 p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0;
699 p->p_tu.tu_gen = 0;
700
701 memset(p->p_name, 0, sizeof p->p_name);
702
703 km_free(argp, NCARGS, &kv_exec, &kp_pageable);
704
705 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
706 vn_close(pack.ep_vp, FREAD, cred, p);
707
708 /*
709 * notify others that we exec'd
710 */
711 knote(&pr->ps_klist, NOTE_EXEC);
712
713 /* map the process's timekeep page, needs to be before exec_elf_fixup */
714 if (exec_timekeep_map(pr))
715 goto free_pack_abort;
716
717 /* setup new registers and do misc. setup. */
718 if (exec_elf_fixup(p, &pack) != 0)
719 goto free_pack_abort;
720 #ifdef MACHINE_STACK_GROWS_UP
721 setregs(p, &pack, (u_long)stack + slen, &arginfo);
722 #else
723 setregs(p, &pack, (u_long)stack, &arginfo);
724 #endif
725
726 /* map the process's signal trampoline code */
727 if (exec_sigcode_map(pr))
728 goto free_pack_abort;
729
730 #ifdef __HAVE_EXEC_MD_MAP
731 /* perform md specific mappings that process might need */
732 if (exec_md_map(p, &pack))
733 goto free_pack_abort;
734 #endif
735
736 if (pr->ps_flags & PS_TRACED)
737 psignal(p, SIGTRAP);
738
739 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
740
741 p->p_descfd = 255;
742 if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255)
743 p->p_descfd = pack.ep_fd;
744
745 if (pack.ep_flags & EXEC_WXNEEDED)
746 atomic_setbits_int(&p->p_p->ps_flags, PS_WXNEEDED);
747 else
748 atomic_clearbits_int(&p->p_p->ps_flags, PS_WXNEEDED);
749
750 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
751 single_thread_clear(p);
752
753 /* setregs() sets up all the registers, so just 'return' */
754 return EJUSTRETURN;
755
756 bad:
757 /* free the vmspace-creation commands, and release their references */
758 kill_vmcmds(&pack.ep_vmcmds);
759 /* kill any opened file descriptor, if necessary */
760 if (pack.ep_flags & EXEC_HASFD) {
761 pack.ep_flags &= ~EXEC_HASFD;
762 fdplock(p->p_fd);
763 /* fdrelease unlocks p->p_fd. */
764 (void) fdrelease(p, pack.ep_fd);
765 }
766 if (pack.ep_interp != NULL)
767 pool_put(&namei_pool, pack.ep_interp);
768 free(pack.ep_args, M_TEMP, sizeof *pack.ep_args);
769 free(pack.ep_pins, M_PINSYSCALL, pack.ep_npins * sizeof(u_int));
770 /* close and put the exec'd file */
771 vn_close(pack.ep_vp, FREAD, cred, p);
772 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
773 km_free(argp, NCARGS, &kv_exec, &kp_pageable);
774
775 freehdr:
776 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
777 atomic_clearbits_int(&pr->ps_flags, PS_INEXEC);
778 single_thread_clear(p);
779
780 return (error);
781
782 exec_abort:
783 /*
784 * the old process doesn't exist anymore. exit gracefully.
785 * get rid of the (new) address space we have created, if any, get rid
786 * of our namei data and vnode, and exit noting failure
787 */
788 uvm_unmap(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
789 if (pack.ep_interp != NULL)
790 pool_put(&namei_pool, pack.ep_interp);
791 free(pack.ep_args, M_TEMP, sizeof *pack.ep_args);
792 pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf);
793 vn_close(pack.ep_vp, FREAD, cred, p);
794 km_free(argp, NCARGS, &kv_exec, &kp_pageable);
795
796 free_pack_abort:
797 free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen);
798 exit1(p, 0, SIGABRT, EXIT_NORMAL);
799 /* NOTREACHED */
800 }
801
802
803 int
copyargs(struct exec_package * pack,struct ps_strings * arginfo,void * stack,void * argp)804 copyargs(struct exec_package *pack, struct ps_strings *arginfo, void *stack,
805 void *argp)
806 {
807 char **cpp = stack;
808 char *dp, *sp;
809 size_t len;
810 void *nullp = NULL;
811 long argc = arginfo->ps_nargvstr;
812 int envc = arginfo->ps_nenvstr;
813
814 if (copyout(&argc, cpp++, sizeof(argc)))
815 return (0);
816
817 dp = (char *) (cpp + argc + envc + 2 + ELF_AUX_WORDS);
818 sp = argp;
819
820 /* XXX don't copy them out, remap them! */
821 arginfo->ps_argvstr = cpp; /* remember location of argv for later */
822
823 for (; --argc >= 0; sp += len, dp += len)
824 if (copyout(&dp, cpp++, sizeof(dp)) ||
825 copyoutstr(sp, dp, ARG_MAX, &len))
826 return (0);
827
828 if (copyout(&nullp, cpp++, sizeof(nullp)))
829 return (0);
830
831 arginfo->ps_envstr = cpp; /* remember location of envp for later */
832
833 for (; --envc >= 0; sp += len, dp += len)
834 if (copyout(&dp, cpp++, sizeof(dp)) ||
835 copyoutstr(sp, dp, ARG_MAX, &len))
836 return (0);
837
838 if (copyout(&nullp, cpp++, sizeof(nullp)))
839 return (0);
840
841 /* if this process needs auxinfo, note where to place it */
842 if (pack->ep_args != NULL)
843 pack->ep_auxinfo = cpp;
844
845 return (1);
846 }
847
848 int
exec_sigcode_map(struct process * pr)849 exec_sigcode_map(struct process *pr)
850 {
851 extern char sigcode[], esigcode[], sigcoderet[];
852 vsize_t sz;
853
854 sz = (vaddr_t)esigcode - (vaddr_t)sigcode;
855
856 /*
857 * If we don't have a sigobject yet, create one.
858 *
859 * sigobject is an anonymous memory object (just like SYSV shared
860 * memory) that we keep a permanent reference to and that we map
861 * in all processes that need this sigcode. The creation is simple,
862 * we create an object, add a permanent reference to it, map it in
863 * kernel space, copy out the sigcode to it and unmap it. Then we map
864 * it with PROT_EXEC into the process just the way sys_mmap would map it.
865 */
866 if (sigobject == NULL) {
867 extern int sigfillsiz;
868 extern u_char sigfill[];
869 size_t off, left;
870 vaddr_t va;
871 int r;
872
873 sigobject = uao_create(sz, 0);
874 uao_reference(sigobject); /* permanent reference */
875
876 if ((r = uvm_map(kernel_map, &va, round_page(sz), sigobject,
877 0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
878 MAP_INHERIT_SHARE, MADV_RANDOM, 0)))) {
879 uao_detach(sigobject);
880 return (ENOMEM);
881 }
882
883 for (off = 0, left = round_page(sz); left != 0;
884 off += sigfillsiz) {
885 size_t chunk = ulmin(left, sigfillsiz);
886 memcpy((caddr_t)va + off, sigfill, chunk);
887 left -= chunk;
888 }
889 memcpy((caddr_t)va, sigcode, sz);
890
891 (void) uvm_map_protect(kernel_map, va, round_page(sz),
892 PROT_READ, 0, FALSE, FALSE);
893 sigcode_va = va;
894 sigcode_sz = round_page(sz);
895 }
896
897 pr->ps_sigcode = 0; /* no hint */
898 uao_reference(sigobject);
899 if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_sigcode, round_page(sz),
900 sigobject, 0, 0, UVM_MAPFLAG(PROT_EXEC,
901 PROT_READ | PROT_WRITE | PROT_EXEC, MAP_INHERIT_COPY,
902 MADV_RANDOM, UVM_FLAG_COPYONW))) {
903 uao_detach(sigobject);
904 return (ENOMEM);
905 }
906 uvm_map_immutable(&pr->ps_vmspace->vm_map, pr->ps_sigcode,
907 pr->ps_sigcode + round_page(sz), 1);
908
909 /* Calculate PC at point of sigreturn entry */
910 pr->ps_sigcoderet = pr->ps_sigcode + (sigcoderet - sigcode);
911
912 return (0);
913 }
914
915 int
exec_timekeep_map(struct process * pr)916 exec_timekeep_map(struct process *pr)
917 {
918 size_t timekeep_sz = round_page(sizeof(struct timekeep));
919
920 /*
921 * Similar to the sigcode object
922 */
923 if (timekeep_object == NULL) {
924 vaddr_t va = 0;
925
926 timekeep_object = uao_create(timekeep_sz, 0);
927 uao_reference(timekeep_object);
928
929 if (uvm_map(kernel_map, &va, timekeep_sz, timekeep_object,
930 0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
931 MAP_INHERIT_SHARE, MADV_RANDOM, 0))) {
932 uao_detach(timekeep_object);
933 timekeep_object = NULL;
934 return (ENOMEM);
935 }
936 if (uvm_fault_wire(kernel_map, va, va + timekeep_sz,
937 PROT_READ | PROT_WRITE)) {
938 uvm_unmap(kernel_map, va, va + timekeep_sz);
939 uao_detach(timekeep_object);
940 timekeep_object = NULL;
941 return (ENOMEM);
942 }
943
944 timekeep = (struct timekeep *)va;
945 timekeep->tk_version = TK_VERSION;
946 }
947
948 pr->ps_timekeep = 0; /* no hint */
949 uao_reference(timekeep_object);
950 if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_timekeep, timekeep_sz,
951 timekeep_object, 0, 0, UVM_MAPFLAG(PROT_READ, PROT_READ,
952 MAP_INHERIT_COPY, MADV_RANDOM, 0))) {
953 uao_detach(timekeep_object);
954 return (ENOMEM);
955 }
956 uvm_map_immutable(&pr->ps_vmspace->vm_map, pr->ps_timekeep,
957 pr->ps_timekeep + timekeep_sz, 1);
958
959 return (0);
960 }
961