xref: /openbsd/sys/kern/exec_subr.c (revision 30d20579)
1 /*	$OpenBSD: exec_subr.c,v 1.67 2024/04/02 08:39:16 deraadt Exp $	*/
2 /*	$NetBSD: exec_subr.c,v 1.9 1994/12/04 03:10:42 mycroft Exp $	*/
3 
4 /*
5  * Copyright (c) 1993, 1994 Christopher G. Demetriou
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Christopher G. Demetriou.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/proc.h>
37 #include <sys/malloc.h>
38 #include <sys/vnode.h>
39 #include <sys/exec.h>
40 #include <sys/mman.h>
41 #include <sys/resourcevar.h>
42 
43 #include <uvm/uvm_extern.h>
44 
45 #ifdef DEBUG
46 /*
47  * new_vmcmd():
48  *	create a new vmcmd structure and fill in its fields based
49  *	on function call arguments.  make sure objects ref'd by
50  *	the vmcmd are 'held'.
51  *
52  * If not debugging, this is a macro, so it's expanded inline.
53  */
54 
55 void
new_vmcmd(struct exec_vmcmd_set * evsp,int (* proc)(struct proc *,struct exec_vmcmd *),u_long len,u_long addr,struct vnode * vp,u_long offset,u_int prot,int flags)56 new_vmcmd(struct exec_vmcmd_set *evsp,
57     int (*proc)(struct proc *, struct exec_vmcmd *), u_long len, u_long addr,
58     struct vnode *vp, u_long offset, u_int prot, int flags)
59 {
60 	struct exec_vmcmd    *vcp;
61 
62 	if (evsp->evs_used >= evsp->evs_cnt)
63 		vmcmdset_extend(evsp);
64 	vcp = &evsp->evs_cmds[evsp->evs_used++];
65 	vcp->ev_proc = proc;
66 	vcp->ev_len = len;
67 	vcp->ev_addr = addr;
68 	if ((vcp->ev_vp = vp) != NULL)
69 		vref(vp);
70 	vcp->ev_offset = offset;
71 	vcp->ev_prot = prot;
72 	vcp->ev_flags = flags;
73 }
74 #endif /* DEBUG */
75 
76 void
vmcmdset_extend(struct exec_vmcmd_set * evsp)77 vmcmdset_extend(struct exec_vmcmd_set *evsp)
78 {
79 	struct exec_vmcmd *nvcp;
80 	u_int ocnt;
81 
82 #ifdef DIAGNOSTIC
83 	if (evsp->evs_used < evsp->evs_cnt)
84 		panic("vmcmdset_extend: not necessary");
85 #endif
86 
87 	ocnt = evsp->evs_cnt;
88 	KASSERT(ocnt > 0);
89 	/* figure out number of entries in new set */
90 	evsp->evs_cnt += ocnt;
91 
92 	/* reallocate the command set */
93 	nvcp = mallocarray(evsp->evs_cnt, sizeof(*nvcp), M_EXEC,
94 	    M_WAITOK);
95 	memcpy(nvcp, evsp->evs_cmds, ocnt * sizeof(*nvcp));
96 	if (evsp->evs_cmds != evsp->evs_start)
97 		free(evsp->evs_cmds, M_EXEC, ocnt * sizeof(*nvcp));
98 	evsp->evs_cmds = nvcp;
99 }
100 
101 void
kill_vmcmds(struct exec_vmcmd_set * evsp)102 kill_vmcmds(struct exec_vmcmd_set *evsp)
103 {
104 	struct exec_vmcmd *vcp;
105 	int i;
106 
107 	for (i = 0; i < evsp->evs_used; i++) {
108 		vcp = &evsp->evs_cmds[i];
109 		if (vcp->ev_vp != NULLVP)
110 			vrele(vcp->ev_vp);
111 	}
112 
113 	/*
114 	 * Free old vmcmds and reset the array.
115 	 */
116 	evsp->evs_used = 0;
117 	if (evsp->evs_cmds != evsp->evs_start)
118 		free(evsp->evs_cmds, M_EXEC,
119 		    evsp->evs_cnt * sizeof(struct exec_vmcmd));
120 	evsp->evs_cmds = evsp->evs_start;
121 	evsp->evs_cnt = EXEC_DEFAULT_VMCMD_SETSIZE;
122 }
123 
124 int
exec_process_vmcmds(struct proc * p,struct exec_package * epp)125 exec_process_vmcmds(struct proc *p, struct exec_package *epp)
126 {
127 	struct exec_vmcmd *base_vc = NULL;
128 	int error = 0;
129 	int i;
130 
131 	for (i = 0; i < epp->ep_vmcmds.evs_used && !error; i++) {
132 		struct exec_vmcmd *vcp;
133 
134 		vcp = &epp->ep_vmcmds.evs_cmds[i];
135 
136 		if (vcp->ev_flags & VMCMD_RELATIVE) {
137 #ifdef DIAGNOSTIC
138 			if (base_vc == NULL)
139 				panic("exec_process_vmcmds: RELATIVE no base");
140 #endif
141 			vcp->ev_addr += base_vc->ev_addr;
142 		}
143 		error = (*vcp->ev_proc)(p, vcp);
144 		if (vcp->ev_flags & VMCMD_BASE) {
145 			base_vc = vcp;
146 		}
147 	}
148 
149 	kill_vmcmds(&epp->ep_vmcmds);
150 
151 	return (error);
152 }
153 
154 /*
155  * vmcmd_map_pagedvn():
156  *	handle vmcmd which specifies that a vnode should be mmap'd.
157  *	appropriate for handling demand-paged text and data segments.
158  */
159 
160 int
vmcmd_map_pagedvn(struct proc * p,struct exec_vmcmd * cmd)161 vmcmd_map_pagedvn(struct proc *p, struct exec_vmcmd *cmd)
162 {
163 	/*
164 	 * note that if you're going to map part of a process as being
165 	 * paged from a vnode, that vnode had damn well better be marked as
166 	 * VTEXT.  that's handled in the routine which sets up the vmcmd to
167 	 * call this routine.
168 	 */
169 	struct uvm_object *uobj;
170 	unsigned int flags = UVM_FLAG_COPYONW | UVM_FLAG_FIXED;
171 	int error;
172 
173 	/*
174 	 * map the vnode in using uvm_map.
175 	 */
176 
177 	if (cmd->ev_len == 0)
178 		return (0);
179 	if (cmd->ev_offset & PAGE_MASK)
180 		return (EINVAL);
181 	if (cmd->ev_addr & PAGE_MASK)
182 		return (EINVAL);
183 	if (cmd->ev_len & PAGE_MASK)
184 		return (EINVAL);
185 
186 	/*
187 	 * first, attach to the object
188 	 */
189 
190 	uobj = uvn_attach(cmd->ev_vp, PROT_READ | PROT_EXEC);
191 	if (uobj == NULL)
192 		return (ENOMEM);
193 
194 	/*
195 	 * do the map
196 	 */
197 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
198 	    uobj, cmd->ev_offset, 0,
199 	    UVM_MAPFLAG(cmd->ev_prot, PROT_MASK, MAP_INHERIT_COPY,
200 	    MADV_NORMAL, flags));
201 
202 	/*
203 	 * check for error
204 	 */
205 
206 	if (error) {
207 		/*
208 		 * error: detach from object
209 		 */
210 		uobj->pgops->pgo_detach(uobj);
211 	} else {
212 		if (cmd->ev_flags & VMCMD_IMMUTABLE)
213 			uvm_map_immutable(&p->p_vmspace->vm_map, cmd->ev_addr,
214 			    round_page(cmd->ev_addr + cmd->ev_len), 1);
215 #ifdef PMAP_CHECK_COPYIN
216 		if (PMAP_CHECK_COPYIN &&
217 		    ((cmd->ev_flags & VMCMD_IMMUTABLE) && (cmd->ev_prot & PROT_EXEC)))
218 			uvm_map_check_copyin_add(&p->p_vmspace->vm_map,
219 			    cmd->ev_addr, round_page(cmd->ev_addr + cmd->ev_len));
220 #endif
221 	}
222 
223 	return (error);
224 }
225 
226 /*
227  * vmcmd_map_readvn():
228  *	handle vmcmd which specifies that a vnode should be read from.
229  *	appropriate for non-demand-paged text/data segments, i.e. impure
230  *	objects (a la OMAGIC and NMAGIC).
231  */
232 
233 int
vmcmd_map_readvn(struct proc * p,struct exec_vmcmd * cmd)234 vmcmd_map_readvn(struct proc *p, struct exec_vmcmd *cmd)
235 {
236 	int error;
237 	vm_prot_t prot;
238 
239 	if (cmd->ev_len == 0)
240 		return (0);
241 
242 	prot = cmd->ev_prot;
243 
244 	KASSERT((cmd->ev_addr & PAGE_MASK) == 0);
245 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
246 	    round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
247 	    UVM_MAPFLAG(prot | PROT_WRITE, PROT_MASK, MAP_INHERIT_COPY,
248 	    MADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
249 
250 	if (error)
251 		return (error);
252 
253 	error = vn_rdwr(UIO_READ, cmd->ev_vp, (caddr_t)cmd->ev_addr,
254 	    cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
255 	    p->p_ucred, NULL, p);
256 	if (error)
257 		return (error);
258 
259 	if ((prot & PROT_WRITE) == 0) {
260 		/*
261 		 * we had to map in the area at PROT_WRITE so that vn_rdwr()
262 		 * could write to it.   however, the caller seems to want
263 		 * it mapped read-only, so now we are going to have to call
264 		 * uvm_map_protect() to fix up the protection.  ICK.
265 		 */
266 		error = (uvm_map_protect(&p->p_vmspace->vm_map,
267 		    cmd->ev_addr, round_page(cmd->ev_len),
268 		    prot, 0, FALSE, TRUE));
269 	}
270 	if (error == 0) {
271 		if (cmd->ev_flags & VMCMD_IMMUTABLE)
272 			uvm_map_immutable(&p->p_vmspace->vm_map, cmd->ev_addr,
273 			    round_page(cmd->ev_addr + cmd->ev_len), 1);
274 	}
275 	return (error);
276 }
277 
278 /*
279  * vmcmd_map_zero():
280  *	handle vmcmd which specifies a zero-filled address space region.
281  */
282 
283 int
vmcmd_map_zero(struct proc * p,struct exec_vmcmd * cmd)284 vmcmd_map_zero(struct proc *p, struct exec_vmcmd *cmd)
285 {
286 	int error;
287 
288 	if (cmd->ev_len == 0)
289 		return (0);
290 
291 	KASSERT((cmd->ev_addr & PAGE_MASK) == 0);
292 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
293 	    round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
294 	    UVM_MAPFLAG(cmd->ev_prot, PROT_MASK, MAP_INHERIT_COPY,
295 	    MADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW |
296 	    (cmd->ev_flags & VMCMD_STACK ? UVM_FLAG_STACK : 0)));
297 	if (cmd->ev_flags & VMCMD_IMMUTABLE)
298 		uvm_map_immutable(&p->p_vmspace->vm_map, cmd->ev_addr,
299 		    round_page(cmd->ev_addr + cmd->ev_len), 1);
300 	return error;
301 }
302 
303 /*
304  * vmcmd_mutable():
305  *	handle vmcmd which changes an address space region.back to mutable
306  */
307 
308 int
vmcmd_mutable(struct proc * p,struct exec_vmcmd * cmd)309 vmcmd_mutable(struct proc *p, struct exec_vmcmd *cmd)
310 {
311 	if (cmd->ev_len == 0)
312 		return (0);
313 
314 	/* ev_addr, ev_len may be misaligned, so maximize the region */
315 	uvm_map_immutable(&p->p_vmspace->vm_map, trunc_page(cmd->ev_addr),
316 	    round_page(cmd->ev_addr + cmd->ev_len), 0);
317 	return 0;
318 }
319 
320 /*
321  * vmcmd_randomize():
322  *	handle vmcmd which specifies a randomized address space region.
323  */
324 #define RANDOMIZE_CTX_THRESHOLD 512
325 int
vmcmd_randomize(struct proc * p,struct exec_vmcmd * cmd)326 vmcmd_randomize(struct proc *p, struct exec_vmcmd *cmd)
327 {
328 	int error;
329 	struct arc4random_ctx *ctx;
330 	char *buf;
331 	size_t sublen, off = 0;
332 	size_t len = cmd->ev_len;
333 
334 	if (len == 0)
335 		return (0);
336 	if (len > ELF_RANDOMIZE_LIMIT)
337 		return (EINVAL);
338 
339 	buf = malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
340 	if (len < RANDOMIZE_CTX_THRESHOLD) {
341 		arc4random_buf(buf, len);
342 		error = copyout(buf, (void *)cmd->ev_addr, len);
343 		explicit_bzero(buf, len);
344 	} else {
345 		ctx = arc4random_ctx_new();
346 		do {
347 			sublen = MIN(len, PAGE_SIZE);
348 			arc4random_ctx_buf(ctx, buf, sublen);
349 			error = copyout(buf, (void *)cmd->ev_addr + off, sublen);
350 			if (error)
351 				break;
352 			off += sublen;
353 			len -= sublen;
354 			sched_pause(yield);
355 		} while (len);
356 		arc4random_ctx_free(ctx);
357 		explicit_bzero(buf, PAGE_SIZE);
358 	}
359 	free(buf, M_TEMP, PAGE_SIZE);
360 	return (error);
361 }
362 
363 #ifndef MAXSSIZ_GUARD
364 #define MAXSSIZ_GUARD	(1024 * 1024)
365 #endif
366 
367 /*
368  * exec_setup_stack(): Set up the stack segment for an executable.
369  *
370  * Note that the ep_ssize parameter must be set to be the current stack
371  * limit; this is adjusted in the body of execve() to yield the
372  * appropriate stack segment usage once the argument length is
373  * calculated.
374  *
375  * This function returns an int for uniformity with other (future) formats'
376  * stack setup functions.  They might have errors to return.
377  */
378 
379 int
exec_setup_stack(struct proc * p,struct exec_package * epp)380 exec_setup_stack(struct proc *p, struct exec_package *epp)
381 {
382 	vsize_t dist = 0;
383 
384 #ifdef MACHINE_STACK_GROWS_UP
385 	epp->ep_maxsaddr = USRSTACK;
386 	epp->ep_minsaddr = USRSTACK + MAXSSIZ;
387 #else
388 	epp->ep_maxsaddr = USRSTACK - MAXSSIZ - MAXSSIZ_GUARD;
389 	epp->ep_minsaddr = USRSTACK;
390 #endif
391 	epp->ep_ssize = round_page(lim_cur(RLIMIT_STACK));
392 
393 #ifdef VM_MIN_STACK_ADDRESS
394 	dist = USRSTACK - MAXSSIZ - MAXSSIZ_GUARD - VM_MIN_STACK_ADDRESS;
395 	if (dist >> PAGE_SHIFT > 0xffffffff)
396 		dist = (vsize_t)arc4random() << PAGE_SHIFT;
397 	else
398 		dist = (vsize_t)arc4random_uniform(dist >> PAGE_SHIFT) << PAGE_SHIFT;
399 #else
400 	if (stackgap_random != 0) {
401 		dist = arc4random() & (stackgap_random - 1);
402 		dist = trunc_page(dist);
403 	}
404 #endif
405 
406 #ifdef MACHINE_STACK_GROWS_UP
407 	epp->ep_maxsaddr += dist;
408 	epp->ep_minsaddr += dist;
409 #else
410 	epp->ep_maxsaddr -= dist;
411 	epp->ep_minsaddr -= dist;
412 #endif
413 
414 	/*
415 	 * set up commands for stack.  note that this takes *two*, one to
416 	 * map the part of the stack which we can access, and one to map
417 	 * the part which we can't.
418 	 *
419 	 * arguably, it could be made into one, but that would require the
420 	 * addition of another mapping proc, which is unnecessary
421 	 *
422 	 * note that in memory, things assumed to be: 0 ....... ep_maxsaddr
423 	 * <stack> ep_minsaddr
424 	 */
425 #ifdef MACHINE_STACK_GROWS_UP
426 	NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero,
427 	    ((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr),
428 	    epp->ep_maxsaddr + epp->ep_ssize,
429 	    NULLVP, 0, PROT_NONE,  VMCMD_IMMUTABLE);
430 	NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize,
431 	    epp->ep_maxsaddr,
432 	    NULLVP, 0, PROT_READ | PROT_WRITE, VMCMD_STACK | VMCMD_IMMUTABLE);
433 #else
434 	NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero,
435 	    ((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr),
436 	    epp->ep_maxsaddr,
437 	    NULLVP, 0, PROT_NONE, VMCMD_IMMUTABLE);
438 	NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize,
439 	    (epp->ep_minsaddr - epp->ep_ssize),
440 	    NULLVP, 0, PROT_READ | PROT_WRITE, VMCMD_STACK | VMCMD_IMMUTABLE);
441 #endif
442 
443 	return (0);
444 }
445