xref: /openbsd/sys/kern/exec_elf.c (revision ebd6ceec)
1 /*	$OpenBSD: exec_elf.c,v 1.191 2024/09/15 23:13:19 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 1996 Per Fogelstrom
5  * All rights reserved.
6  *
7  * Copyright (c) 1994 Christos Zoulas
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  */
33 
34 /*
35  * Copyright (c) 2001 Wasabi Systems, Inc.
36  * All rights reserved.
37  *
38  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *	This product includes software developed for the NetBSD Project by
51  *	Wasabi Systems, Inc.
52  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
53  *    or promote products derived from this software without specific prior
54  *    written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
57  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
58  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
59  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
60  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
61  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
62  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
63  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
64  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
65  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66  * POSSIBILITY OF SUCH DAMAGE.
67  */
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/proc.h>
72 #include <sys/malloc.h>
73 #include <sys/pool.h>
74 #include <sys/mount.h>
75 #include <sys/namei.h>
76 #include <sys/vnode.h>
77 #include <sys/core.h>
78 #include <sys/exec.h>
79 #include <sys/exec_elf.h>
80 #include <sys/fcntl.h>
81 #include <sys/ptrace.h>
82 #include <sys/signalvar.h>
83 #include <sys/pledge.h>
84 #include <sys/syscall.h>
85 
86 #include <sys/mman.h>
87 
88 #include <uvm/uvm_extern.h>
89 
90 #include <machine/reg.h>
91 #include <machine/exec.h>
92 #include <machine/elf.h>
93 
94 int	elf_load_file(struct proc *, char *, struct exec_package *,
95 	    struct elf_args *);
96 int	elf_check_header(Elf_Ehdr *);
97 int	elf_read_from(struct proc *, struct vnode *, u_long, void *, int);
98 void	elf_load_psection(struct exec_vmcmd_set *, struct vnode *,
99 	    Elf_Phdr *, Elf_Addr *, Elf_Addr *, int *, int);
100 int	elf_os_pt_note_name(Elf_Note *);
101 int	elf_os_pt_note(struct proc *, struct exec_package *, Elf_Ehdr *, int *);
102 int	elf_read_pintable(struct proc *p, struct vnode *vp, Elf_Phdr *pp,
103 	    u_int **pinp, int is_ldso, size_t len);
104 
105 /* round up and down to page boundaries. */
106 #define ELF_ROUND(a, b)		(((a) + (b) - 1) & ~((b) - 1))
107 #define ELF_TRUNC(a, b)		((a) & ~((b) - 1))
108 
109 /*
110  * We limit the number of program headers to 32, this should
111  * be a reasonable limit for ELF, the most we have seen so far is 12
112  */
113 #define ELF_MAX_VALID_PHDR 32
114 
115 #define ELF_NOTE_NAME_OPENBSD	0x01
116 
117 struct elf_note_name {
118 	char *name;
119 	int id;
120 } elf_note_names[] = {
121 	{ "OpenBSD",	ELF_NOTE_NAME_OPENBSD },
122 };
123 
124 #define	ELFROUNDSIZE	sizeof(Elf_Word)
125 #define	elfround(x)	roundup((x), ELFROUNDSIZE)
126 
127 
128 /*
129  * Check header for validity; return 0 for ok, ENOEXEC if error
130  */
131 int
elf_check_header(Elf_Ehdr * ehdr)132 elf_check_header(Elf_Ehdr *ehdr)
133 {
134 	/*
135 	 * We need to check magic, class size, endianness, and version before
136 	 * we look at the rest of the Elf_Ehdr structure. These few elements
137 	 * are represented in a machine independent fashion.
138 	 */
139 	if (!IS_ELF(*ehdr) ||
140 	    ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
141 	    ehdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
142 	    ehdr->e_ident[EI_VERSION] != ELF_TARG_VER)
143 		return (ENOEXEC);
144 
145 	/* Now check the machine dependent header */
146 	if (ehdr->e_machine != ELF_TARG_MACH ||
147 	    ehdr->e_version != ELF_TARG_VER)
148 		return (ENOEXEC);
149 
150 	/* Don't allow an insane amount of sections. */
151 	if (ehdr->e_phnum > ELF_MAX_VALID_PHDR)
152 		return (ENOEXEC);
153 
154 	return (0);
155 }
156 
157 /*
158  * Load a psection at the appropriate address
159  */
160 void
elf_load_psection(struct exec_vmcmd_set * vcset,struct vnode * vp,Elf_Phdr * ph,Elf_Addr * addr,Elf_Addr * size,int * prot,int flags)161 elf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp,
162     Elf_Phdr *ph, Elf_Addr *addr, Elf_Addr *size, int *prot, int flags)
163 {
164 	u_long msize, lsize, psize, rm, rf;
165 	long diff, offset, bdiff;
166 	Elf_Addr base;
167 
168 	/*
169 	 * If the user specified an address, then we load there.
170 	 */
171 	if (*addr != ELF_NO_ADDR) {
172 		if (ph->p_align > 1) {
173 			*addr = ELF_TRUNC(*addr, ph->p_align);
174 			diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align);
175 			/* page align vaddr */
176 			base = *addr + trunc_page(ph->p_vaddr)
177 			    - ELF_TRUNC(ph->p_vaddr, ph->p_align);
178 		} else {
179 			diff = 0;
180 			base = *addr + trunc_page(ph->p_vaddr) - ph->p_vaddr;
181 		}
182 	} else {
183 		*addr = ph->p_vaddr;
184 		if (ph->p_align > 1)
185 			*addr = ELF_TRUNC(*addr, ph->p_align);
186 		base = trunc_page(ph->p_vaddr);
187 		diff = ph->p_vaddr - *addr;
188 	}
189 	bdiff = ph->p_vaddr - trunc_page(ph->p_vaddr);
190 
191 	/*
192 	 * Enforce W^X and map W|X segments without X permission
193 	 * initially.  The dynamic linker will make these read-only
194 	 * and add back X permission after relocation processing.
195 	 * Static executables with W|X segments will probably crash.
196 	 */
197 	*prot |= (ph->p_flags & PF_R) ? PROT_READ : 0;
198 	*prot |= (ph->p_flags & PF_W) ? PROT_WRITE : 0;
199 	if ((ph->p_flags & PF_W) == 0)
200 		*prot |= (ph->p_flags & PF_X) ? PROT_EXEC : 0;
201 
202 	/*
203 	 * Apply immutability as much as possible, but not text/rodata
204 	 * segments of textrel binaries, or RELRO or PT_OPENBSD_MUTABLE
205 	 * sections, or LOADS marked PF_OPENBSD_MUTABLE, or LOADS which
206 	 * violate W^X.
207 	 * Userland (meaning crt0 or ld.so) will repair those regions.
208 	 */
209 	if ((ph->p_flags & (PF_X | PF_W)) != (PF_X | PF_W) &&
210 	    ((ph->p_flags & PF_OPENBSD_MUTABLE) == 0))
211 		flags |= VMCMD_IMMUTABLE;
212 	if ((flags & VMCMD_TEXTREL) && (ph->p_flags & PF_W) == 0)
213 		flags &= ~VMCMD_IMMUTABLE;
214 
215 	msize = ph->p_memsz + diff;
216 	offset = ph->p_offset - bdiff;
217 	lsize = ph->p_filesz + bdiff;
218 	psize = round_page(lsize);
219 
220 	/*
221 	 * Because the pagedvn pager can't handle zero fill of the last
222 	 * data page if it's not page aligned we map the last page readvn.
223 	 */
224 	if (ph->p_flags & PF_W) {
225 		psize = trunc_page(lsize);
226 		if (psize > 0)
227 			NEW_VMCMD2(vcset, vmcmd_map_pagedvn, psize, base, vp,
228 			    offset, *prot, flags);
229 		if (psize != lsize) {
230 			NEW_VMCMD2(vcset, vmcmd_map_readvn, lsize - psize,
231 			    base + psize, vp, offset + psize, *prot, flags);
232 		}
233 	} else {
234 		NEW_VMCMD2(vcset, vmcmd_map_pagedvn, psize, base, vp, offset,
235 		    *prot, flags);
236 	}
237 
238 	/*
239 	 * Check if we need to extend the size of the segment
240 	 */
241 	rm = round_page(*addr + ph->p_memsz + diff);
242 	rf = round_page(*addr + ph->p_filesz + diff);
243 
244 	if (rm != rf) {
245 		NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP, 0,
246 		    *prot, flags);
247 	}
248 	*size = msize;
249 }
250 
251 /*
252  * Read from vnode into buffer at offset.
253  */
254 int
elf_read_from(struct proc * p,struct vnode * vp,u_long off,void * buf,int size)255 elf_read_from(struct proc *p, struct vnode *vp, u_long off, void *buf,
256     int size)
257 {
258 	int error;
259 	size_t resid;
260 
261 	if ((error = vn_rdwr(UIO_READ, vp, buf, size, off, UIO_SYSSPACE,
262 	    0, p->p_ucred, &resid, p)) != 0)
263 		return error;
264 	/*
265 	 * See if we got all of it
266 	 */
267 	if (resid != 0)
268 		return (ENOEXEC);
269 	return (0);
270 }
271 
272 /*
273  * rebase the pin offsets inside a base,len window for the text segment only.
274  */
275 void
elf_adjustpins(vaddr_t * basep,size_t * lenp,u_int * pins,int npins,u_int offset)276 elf_adjustpins(vaddr_t *basep, size_t *lenp, u_int *pins, int npins, u_int offset)
277 {
278 	int i;
279 
280 	/* Adjust offsets, base, len */
281 	for (i = 0; i < npins; i++) {
282 		if (pins[i] == -1 || pins[i] == 0)
283 			continue;
284 		pins[i] -= offset;
285 	}
286 	*basep += offset;
287 	*lenp -= offset;
288 }
289 
290 int
elf_read_pintable(struct proc * p,struct vnode * vp,Elf_Phdr * pp,u_int ** pinp,int is_ldso,size_t len)291 elf_read_pintable(struct proc *p, struct vnode *vp, Elf_Phdr *pp,
292     u_int **pinp, int is_ldso, size_t len)
293 {
294 	struct pinsyscalls {
295 		u_int offset;
296 		u_int sysno;
297 	} *syscalls = NULL;
298 	int i, nsyscalls = 0, npins = 0;
299 	u_int *pins = NULL;
300 
301 	if (pp->p_filesz > SYS_MAXSYSCALL * 2 * sizeof(*syscalls) ||
302 	    pp->p_filesz % sizeof(*syscalls) != 0)
303 		goto bad;
304 	nsyscalls = pp->p_filesz / sizeof(*syscalls);
305 	syscalls = malloc(pp->p_filesz, M_PINSYSCALL, M_WAITOK);
306 	if (elf_read_from(p, vp, pp->p_offset, syscalls,
307 	    pp->p_filesz) != 0)
308 		goto bad;
309 
310 	/* Validate, and calculate pintable size */
311 	for (i = 0; i < nsyscalls; i++) {
312 		if (syscalls[i].sysno <= 0 ||
313 		    syscalls[i].sysno >= SYS_MAXSYSCALL ||
314 		    syscalls[i].offset > len) {
315 			npins = 0;
316 			goto bad;
317 		}
318 		npins = MAX(npins, syscalls[i].sysno);
319 	}
320 	if (is_ldso)
321 		npins = MAX(npins, SYS_kbind);	/* XXX see ld.so/loader.c */
322 	npins++;
323 
324 	/* Fill pintable: 0 = invalid, -1 = allowed, else offset from base */
325 	pins = mallocarray(npins, sizeof(u_int), M_PINSYSCALL, M_WAITOK|M_ZERO);
326 	for (i = 0; i < nsyscalls; i++) {
327 		if (pins[syscalls[i].sysno])
328 			pins[syscalls[i].sysno] = -1;	/* duplicated */
329 		else
330 			pins[syscalls[i].sysno] = syscalls[i].offset;
331 	}
332 	if (is_ldso)
333 		pins[SYS_kbind] = -1;	/* XXX see ld.so/loader.c */
334 	*pinp = pins;
335 	pins = NULL;
336 bad:
337 	free(syscalls, M_PINSYSCALL, nsyscalls * sizeof(*syscalls));
338 	free(pins, M_PINSYSCALL, npins * sizeof(u_int));
339 	return npins;
340 }
341 
342 /*
343  * Load a file (interpreter/library) pointed to by path [stolen from
344  * coff_load_shlib()]. Made slightly generic so it might be used externally.
345  */
346 int
elf_load_file(struct proc * p,char * path,struct exec_package * epp,struct elf_args * ap)347 elf_load_file(struct proc *p, char *path, struct exec_package *epp,
348     struct elf_args *ap)
349 {
350 	int error, i;
351 	struct nameidata nd;
352 	Elf_Ehdr eh;
353 	Elf_Phdr *ph = NULL, *syscall_ph = NULL;
354 	u_long phsize = 0;
355 	Elf_Addr addr;
356 	struct vnode *vp;
357 	Elf_Phdr *base_ph = NULL;
358 	struct interp_ld_sec {
359 		Elf_Addr vaddr;
360 		u_long memsz;
361 	} loadmap[ELF_MAX_VALID_PHDR];
362 	int nload, idx = 0;
363 	Elf_Addr pos;
364 	int file_align;
365 	int loop;
366 	size_t randomizequota = ELF_RANDOMIZE_LIMIT;
367 	vaddr_t text_start = -1, text_end = 0;
368 
369 	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p);
370 	nd.ni_pledge = PLEDGE_RPATH;
371 	nd.ni_unveil = UNVEIL_READ;
372 	if ((error = namei(&nd)) != 0) {
373 		return (error);
374 	}
375 	vp = nd.ni_vp;
376 	if (vp->v_type != VREG) {
377 		error = EACCES;
378 		goto bad;
379 	}
380 	if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0)
381 		goto bad;
382 	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
383 		error = EACCES;
384 		goto bad;
385 	}
386 	if ((error = VOP_ACCESS(vp, VREAD, p->p_ucred, p)) != 0)
387 		goto bad1;
388 	if ((error = elf_read_from(p, nd.ni_vp, 0, &eh, sizeof(eh))) != 0)
389 		goto bad1;
390 
391 	if (elf_check_header(&eh) || eh.e_type != ET_DYN) {
392 		error = ENOEXEC;
393 		goto bad1;
394 	}
395 
396 	ph = mallocarray(eh.e_phnum, sizeof(Elf_Phdr), M_TEMP, M_WAITOK);
397 	phsize = eh.e_phnum * sizeof(Elf_Phdr);
398 
399 	if ((error = elf_read_from(p, nd.ni_vp, eh.e_phoff, ph, phsize)) != 0)
400 		goto bad1;
401 
402 	for (i = 0; i < eh.e_phnum; i++) {
403 		if ((ph[i].p_align > 1) && !powerof2(ph[i].p_align)) {
404 			error = EINVAL;
405 			goto bad1;
406 		}
407 
408 		if (ph[i].p_type == PT_LOAD) {
409 			if (ph[i].p_filesz > ph[i].p_memsz ||
410 			    ph[i].p_memsz == 0) {
411 				error = EINVAL;
412 				goto bad1;
413 			}
414 			loadmap[idx].vaddr = trunc_page(ph[i].p_vaddr);
415 			loadmap[idx].memsz = round_page (ph[i].p_vaddr +
416 			    ph[i].p_memsz - loadmap[idx].vaddr);
417 			file_align = ph[i].p_align;
418 			idx++;
419 		}
420 	}
421 	nload = idx;
422 
423 	/*
424 	 * Load the interpreter where a non-fixed mmap(NULL, ...)
425 	 * would (i.e. something safely out of the way).
426 	 */
427 	pos = uvm_map_hint(p->p_vmspace, PROT_EXEC, VM_MIN_ADDRESS,
428 	    VM_MAXUSER_ADDRESS);
429 	pos = ELF_ROUND(pos, file_align);
430 
431 	loop = 0;
432 	for (i = 0; i < nload;/**/) {
433 		vaddr_t	addr;
434 		struct	uvm_object *uobj;
435 		off_t	uoff;
436 		size_t	size;
437 
438 #ifdef this_needs_fixing
439 		if (i == 0) {
440 			uobj = &vp->v_uvm.u_obj;
441 			/* need to fix uoff */
442 		} else {
443 #endif
444 			uobj = NULL;
445 			uoff = 0;
446 #ifdef this_needs_fixing
447 		}
448 #endif
449 
450 		addr = trunc_page(pos + loadmap[i].vaddr);
451 		size =  round_page(addr + loadmap[i].memsz) - addr;
452 
453 		/* CRAP - map_findspace does not avoid daddr+BRKSIZ */
454 		if ((addr + size > (vaddr_t)p->p_vmspace->vm_daddr) &&
455 		    (addr < (vaddr_t)p->p_vmspace->vm_daddr + BRKSIZ))
456 			addr = round_page((vaddr_t)p->p_vmspace->vm_daddr +
457 			    BRKSIZ);
458 
459 		if (uvm_map_mquery(&p->p_vmspace->vm_map, &addr, size,
460 		    (i == 0 ? uoff : UVM_UNKNOWN_OFFSET), 0) != 0) {
461 			if (loop == 0) {
462 				loop = 1;
463 				i = 0;
464 				pos = 0;
465 				continue;
466 			}
467 			error = ENOMEM;
468 			goto bad1;
469 		}
470 		if (addr != pos + loadmap[i].vaddr) {
471 			/* base changed. */
472 			pos = addr - trunc_page(loadmap[i].vaddr);
473 			pos = ELF_ROUND(pos,file_align);
474 			i = 0;
475 			continue;
476 		}
477 
478 		i++;
479 	}
480 
481 	/*
482 	 * Load all the necessary sections
483 	 */
484 	for (i = 0; i < eh.e_phnum; i++) {
485 		Elf_Addr size = 0;
486 		int prot = 0;
487 		int flags;
488 
489 		switch (ph[i].p_type) {
490 		case PT_LOAD:
491 			if (base_ph == NULL) {
492 				flags = VMCMD_BASE;
493 				addr = pos;
494 				base_ph = &ph[i];
495 			} else {
496 				flags = VMCMD_RELATIVE;
497 				addr = ph[i].p_vaddr - base_ph->p_vaddr;
498 			}
499 			elf_load_psection(&epp->ep_vmcmds, nd.ni_vp,
500 			    &ph[i], &addr, &size, &prot, flags);
501 			/* If entry is within this section it must be text */
502 			if (eh.e_entry >= ph[i].p_vaddr &&
503 			    eh.e_entry < (ph[i].p_vaddr + size)) {
504 				/* LOAD containing e_entry may not be writable */
505 				if (prot & PROT_WRITE) {
506 					error = ENOEXEC;
507 					goto bad1;
508 				}
509  				epp->ep_entry = addr + eh.e_entry -
510 				    ELF_TRUNC(ph[i].p_vaddr,ph[i].p_align);
511 				if (flags == VMCMD_RELATIVE)
512 					epp->ep_entry += pos;
513 				ap->arg_interp = pos;
514 			}
515 			if (prot & PROT_EXEC) {
516 				if (addr < text_start)
517 					text_start = addr;
518 				if (addr+size >= text_end)
519 					text_end = addr + size;
520 			}
521 			addr += size;
522 			break;
523 
524 		case PT_PHDR:
525 		case PT_NOTE:
526 			break;
527 
528 		case PT_OPENBSD_RANDOMIZE:
529 			if (ph[i].p_memsz > randomizequota) {
530 				error = ENOMEM;
531 				goto bad1;
532 			}
533 			randomizequota -= ph[i].p_memsz;
534 			NEW_VMCMD(&epp->ep_vmcmds, vmcmd_randomize,
535 			    ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0);
536 			break;
537 
538 		case PT_DYNAMIC:
539 #if defined (__mips__)
540 			/* DT_DEBUG is not ready on mips */
541 			NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable,
542 			    ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0);
543 #endif
544 			break;
545 		case PT_GNU_RELRO:
546 		case PT_OPENBSD_MUTABLE:
547 			NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable,
548 			    ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0);
549 			break;
550 		case PT_OPENBSD_SYSCALLS:
551 			syscall_ph = &ph[i];
552 			break;
553 		default:
554 			break;
555 		}
556 	}
557 
558 	if (syscall_ph) {
559 		struct process *pr = p->p_p;
560 		vaddr_t base = pos;
561 		size_t len = text_end;
562 		u_int *pins;
563 		int npins;
564 
565 		npins = elf_read_pintable(p, nd.ni_vp, syscall_ph,
566 		    &pins, 1, len);
567 		if (npins) {
568 			elf_adjustpins(&base, &len, pins, npins,
569 			    text_start);
570 			pr->ps_pin.pn_start = base;
571 			pr->ps_pin.pn_end = base + len;
572 			pr->ps_pin.pn_pins = pins;
573 			pr->ps_pin.pn_npins = npins;
574 		}
575 	} else {
576 		error = EINVAL;	/* no pin table */
577 		goto bad1;
578 	}
579 
580 	vn_marktext(nd.ni_vp);
581 
582 bad1:
583 	VOP_CLOSE(nd.ni_vp, FREAD, p->p_ucred, p);
584 bad:
585 	free(ph, M_TEMP, phsize);
586 
587 	vput(nd.ni_vp);
588 	return (error);
589 }
590 
591 /*
592  * Prepare an Elf binary's exec package
593  *
594  * First, set of the various offsets/lengths in the exec package.
595  *
596  * Then, mark the text image busy (so it can be demand paged) or error out if
597  * this is not possible.  Finally, set up vmcmds for the text, data, bss, and
598  * stack segments.
599  */
600 int
exec_elf_makecmds(struct proc * p,struct exec_package * epp)601 exec_elf_makecmds(struct proc *p, struct exec_package *epp)
602 {
603 	Elf_Ehdr *eh = epp->ep_hdr;
604 	Elf_Phdr *ph, *pp, *base_ph = NULL, *syscall_ph = NULL;
605 	Elf_Addr phdr = 0, exe_base = 0, exe_end = 0;
606 	int error, i, has_phdr = 0, names = 0, textrel = 0;
607 	char *interp = NULL;
608 	u_long phsize;
609 	size_t randomizequota = ELF_RANDOMIZE_LIMIT;
610 
611 	if (epp->ep_hdrvalid < sizeof(Elf_Ehdr))
612 		return (ENOEXEC);
613 
614 	if (elf_check_header(eh) ||
615 	   (eh->e_type != ET_EXEC && eh->e_type != ET_DYN))
616 		return (ENOEXEC);
617 
618 	/*
619 	 * check if vnode is in open for writing, because we want to demand-
620 	 * page out of it.  if it is, don't do it, for various reasons.
621 	 */
622 	if (epp->ep_vp->v_writecount != 0) {
623 #ifdef DIAGNOSTIC
624 		if (epp->ep_vp->v_flag & VTEXT)
625 			panic("exec: a VTEXT vnode has writecount != 0");
626 #endif
627 		return (ETXTBSY);
628 	}
629 	/*
630 	 * Allocate space to hold all the program headers, and read them
631 	 * from the file
632 	 */
633 	ph = mallocarray(eh->e_phnum, sizeof(Elf_Phdr), M_TEMP, M_WAITOK);
634 	phsize = eh->e_phnum * sizeof(Elf_Phdr);
635 
636 	if ((error = elf_read_from(p, epp->ep_vp, eh->e_phoff, ph,
637 	    phsize)) != 0)
638 		goto bad;
639 
640 	epp->ep_tsize = ELF_NO_ADDR;
641 	epp->ep_dsize = ELF_NO_ADDR;
642 
643 	for (i = 0, pp = ph; i < eh->e_phnum; i++, pp++) {
644 		if ((pp->p_align > 1) && !powerof2(pp->p_align)) {
645 			error = EINVAL;
646 			goto bad;
647 		}
648 
649 		if (pp->p_type == PT_INTERP && !interp) {
650 			if (pp->p_filesz < 2 || pp->p_filesz > MAXPATHLEN)
651 				goto bad;
652 			interp = pool_get(&namei_pool, PR_WAITOK);
653 			if ((error = elf_read_from(p, epp->ep_vp,
654 			    pp->p_offset, interp, pp->p_filesz)) != 0) {
655 				goto bad;
656 			}
657 			if (interp[pp->p_filesz - 1] != '\0')
658 				goto bad;
659 		} else if (pp->p_type == PT_LOAD) {
660 			if (pp->p_filesz > pp->p_memsz ||
661 			    pp->p_memsz == 0) {
662 				error = EINVAL;
663 				goto bad;
664 			}
665 			if (base_ph == NULL)
666 				base_ph = pp;
667 		} else if (pp->p_type == PT_PHDR) {
668 			has_phdr = 1;
669 		}
670 	}
671 
672 	/*
673 	 * Verify this is an OpenBSD executable.  If it's marked that way
674 	 * via a PT_NOTE then also check for a PT_OPENBSD_WXNEEDED segment.
675 	 */
676 	if ((error = elf_os_pt_note(p, epp, epp->ep_hdr, &names)) != 0)
677 		goto bad;
678 	if (eh->e_ident[EI_OSABI] == ELFOSABI_OPENBSD)
679 		names |= ELF_NOTE_NAME_OPENBSD;
680 
681 	if (eh->e_type == ET_DYN) {
682 		/* need phdr and load sections for PIE */
683 		if (!has_phdr || base_ph == NULL || base_ph->p_vaddr != 0) {
684 			error = EINVAL;
685 			goto bad;
686 		}
687 		/* randomize exe_base for PIE */
688 		exe_base = uvm_map_pie(base_ph->p_align);
689 
690 		/*
691 		 * Check if DYNAMIC contains DT_TEXTREL
692 		 */
693 		for (i = 0, pp = ph; i < eh->e_phnum; i++, pp++) {
694 			Elf_Dyn *dt;
695 			int j;
696 
697 			switch (pp->p_type) {
698 			case PT_DYNAMIC:
699 				if (pp->p_filesz > 64*1024)
700 					break;
701 				dt = malloc(pp->p_filesz, M_TEMP, M_WAITOK);
702 				error = vn_rdwr(UIO_READ, epp->ep_vp,
703 				    (caddr_t)dt, pp->p_filesz, pp->p_offset,
704 				    UIO_SYSSPACE, IO_UNIT, p->p_ucred, NULL, p);
705 				if (error) {
706 					free(dt, M_TEMP, pp->p_filesz);
707 					break;
708 				}
709 				for (j = 0; j < pp->p_filesz / sizeof(*dt); j++) {
710 					if (dt[j].d_tag == DT_TEXTREL) {
711 						textrel = VMCMD_TEXTREL;
712 						break;
713 					}
714 				}
715 				free(dt, M_TEMP, pp->p_filesz);
716 				break;
717 			default:
718 				break;
719 			}
720 		}
721 	}
722 
723 	/*
724 	 * Load all the necessary sections
725 	 */
726 	for (i = 0, pp = ph; i < eh->e_phnum; i++, pp++) {
727 		Elf_Addr addr, size = 0;
728 		int prot = 0;
729 		int flags = 0;
730 
731 		switch (pp->p_type) {
732 		case PT_LOAD:
733 			if (exe_base != 0) {
734 				if (pp == base_ph) {
735 					flags = VMCMD_BASE;
736 					addr = exe_base;
737 				} else {
738 					flags = VMCMD_RELATIVE;
739 					addr = pp->p_vaddr - base_ph->p_vaddr;
740 				}
741 			} else
742 				addr = ELF_NO_ADDR;
743 
744 			/* Static binaries may not call pinsyscalls() */
745 			if (interp == NULL)
746 				p->p_vmspace->vm_map.flags |= VM_MAP_PINSYSCALL_ONCE;
747 
748 			/*
749 			 * Calculates size of text and data segments
750 			 * by starting at first and going to end of last.
751 			 * 'rwx' sections are treated as data.
752 			 * this is correct for BSS_PLT, but may not be
753 			 * for DATA_PLT, is fine for TEXT_PLT.
754 			 */
755 			elf_load_psection(&epp->ep_vmcmds, epp->ep_vp,
756 			    pp, &addr, &size, &prot, flags | textrel);
757 
758 			/*
759 			 * Update exe_base in case alignment was off.
760 			 * For PIE, addr is relative to exe_base so
761 			 * adjust it (non PIE exe_base is 0 so no change).
762 			 */
763 			if (flags == VMCMD_BASE)
764 				exe_base = addr;
765 			else
766 				addr += exe_base;
767 
768 			/*
769 			 * Decide whether it's text or data by looking
770 			 * at the protection of the section
771 			 */
772 			if (prot & PROT_WRITE) {
773 				/* data section */
774 				if (epp->ep_dsize == ELF_NO_ADDR) {
775 					epp->ep_daddr = addr;
776 					epp->ep_dsize = size;
777 				} else {
778 					if (addr < epp->ep_daddr) {
779 						epp->ep_dsize =
780 						    epp->ep_dsize +
781 						    epp->ep_daddr -
782 						    addr;
783 						epp->ep_daddr = addr;
784 					} else
785 						epp->ep_dsize = addr+size -
786 						    epp->ep_daddr;
787 				}
788 			} else if (prot & PROT_EXEC) {
789 				/* text section */
790 				if (epp->ep_tsize == ELF_NO_ADDR) {
791 					epp->ep_taddr = addr;
792 					epp->ep_tsize = size;
793 				} else {
794 					if (addr < epp->ep_taddr) {
795 						epp->ep_tsize =
796 						    epp->ep_tsize +
797 						    epp->ep_taddr -
798 						    addr;
799 						epp->ep_taddr = addr;
800 					} else
801 						epp->ep_tsize = addr+size -
802 						    epp->ep_taddr;
803 				}
804 				if (interp == NULL)
805 					exe_end = epp->ep_taddr +
806 					    epp->ep_tsize;	/* end of TEXT */
807 			}
808 			break;
809 
810 		case PT_SHLIB:
811 			error = ENOEXEC;
812 			goto bad;
813 
814 		case PT_INTERP:
815 			/* Already did this one */
816 		case PT_NOTE:
817 			break;
818 
819 		case PT_PHDR:
820 			/* Note address of program headers (in text segment) */
821 			phdr = pp->p_vaddr;
822 			break;
823 
824 		case PT_OPENBSD_RANDOMIZE:
825 			if (ph[i].p_memsz > randomizequota) {
826 				error = ENOMEM;
827 				goto bad;
828 			}
829 			randomizequota -= ph[i].p_memsz;
830 			NEW_VMCMD(&epp->ep_vmcmds, vmcmd_randomize,
831 			    ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0);
832 			break;
833 
834 		case PT_DYNAMIC:
835 #if defined (__mips__)
836 			/* DT_DEBUG is not ready on mips */
837 			NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable,
838 			    ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0);
839 #endif
840 			break;
841 		case PT_GNU_RELRO:
842 		case PT_OPENBSD_MUTABLE:
843 			NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable,
844 			    ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0);
845 			break;
846 		case PT_OPENBSD_SYSCALLS:
847 			if (interp == NULL)
848 				syscall_ph = &ph[i];
849 			break;
850 		default:
851 			/*
852 			 * Not fatal, we don't need to understand everything
853 			 * :-)
854 			 */
855 			break;
856 		}
857 	}
858 
859 	if (syscall_ph) {
860 		vaddr_t base = exe_base;
861 		size_t len = exe_end - exe_base;
862 		u_int *pins;
863 		int npins;
864 
865 		npins = elf_read_pintable(p, epp->ep_vp, syscall_ph,
866 		    &pins, 0, len);
867 		if (npins) {
868 			elf_adjustpins(&base, &len, pins, npins,
869 			    epp->ep_taddr - exe_base);
870 			epp->ep_pinstart = base;
871 			epp->ep_pinend = base + len;
872 			epp->ep_pins = pins;
873 			epp->ep_npins = npins;
874 		}
875 	}
876 
877 	phdr += exe_base;
878 
879 	/*
880 	 * Strangely some linux programs may have all load sections marked
881 	 * writeable, in this case, textsize is not -1, but rather 0;
882 	 */
883 	if (epp->ep_tsize == ELF_NO_ADDR)
884 		epp->ep_tsize = 0;
885 	/*
886 	 * Another possibility is that it has all load sections marked
887 	 * read-only.  Fake a zero-sized data segment right after the
888 	 * text segment.
889 	 */
890 	if (epp->ep_dsize == ELF_NO_ADDR) {
891 		epp->ep_daddr = round_page(epp->ep_taddr + epp->ep_tsize);
892 		epp->ep_dsize = 0;
893 	}
894 
895 	epp->ep_interp = interp;
896 	epp->ep_entry = eh->e_entry + exe_base;
897 
898 	/*
899 	 * Check if we found a dynamically linked binary and arrange to load
900 	 * its interpreter when the exec file is released.
901 	 */
902 	if (interp || eh->e_type == ET_DYN) {
903 		struct elf_args *ap;
904 
905 		ap = malloc(sizeof(*ap), M_TEMP, M_WAITOK);
906 
907 		ap->arg_phaddr = phdr;
908 		ap->arg_phentsize = eh->e_phentsize;
909 		ap->arg_phnum = eh->e_phnum;
910 		ap->arg_entry = eh->e_entry + exe_base;
911 		ap->arg_interp = exe_base;
912 
913 		epp->ep_args = ap;
914 	}
915 
916 	free(ph, M_TEMP, phsize);
917 	vn_marktext(epp->ep_vp);
918 	return (exec_setup_stack(p, epp));
919 
920 bad:
921 	if (interp)
922 		pool_put(&namei_pool, interp);
923 	free(ph, M_TEMP, phsize);
924 	kill_vmcmds(&epp->ep_vmcmds);
925 	if (error == 0)
926 		return (ENOEXEC);
927 	return (error);
928 }
929 
930 #ifdef __HAVE_CPU_HWCAP
931 unsigned long hwcap;
932 #endif /* __HAVE_CPU_HWCAP */
933 
934 #ifdef __HAVE_CPU_HWCAP2
935 unsigned long hwcap2;
936 #endif /* __HAVE_CPU_HWCAP2 */
937 
938 /*
939  * Phase II of load. It is now safe to load the interpreter. Info collected
940  * when loading the program is available for setup of the interpreter.
941  */
942 int
exec_elf_fixup(struct proc * p,struct exec_package * epp)943 exec_elf_fixup(struct proc *p, struct exec_package *epp)
944 {
945 	char	*interp;
946 	int	error = 0;
947 	struct	elf_args *ap;
948 	AuxInfo ai[ELF_AUX_ENTRIES], *a;
949 
950 	ap = epp->ep_args;
951 	if (ap == NULL) {
952 		return (0);
953 	}
954 
955 	interp = epp->ep_interp;
956 
957 	/* disable kbind in programs that don't use ld.so */
958 	if (interp == NULL)
959 		p->p_p->ps_kbind_addr = BOGO_PC;
960 
961 	if (interp &&
962 	    (error = elf_load_file(p, interp, epp, ap)) != 0) {
963 		uprintf("execve: cannot load %s\n", interp);
964 		free(ap, M_TEMP, sizeof *ap);
965 		pool_put(&namei_pool, interp);
966 		kill_vmcmds(&epp->ep_vmcmds);
967 		return (error);
968 	}
969 	/*
970 	 * We have to do this ourselves...
971 	 */
972 	error = exec_process_vmcmds(p, epp);
973 
974 	/*
975 	 * Push extra arguments on the stack needed by dynamically
976 	 * linked binaries
977 	 */
978 	if (error == 0) {
979 		memset(&ai, 0, sizeof ai);
980 		a = ai;
981 
982 		a->au_id = AUX_phdr;
983 		a->au_v = ap->arg_phaddr;
984 		a++;
985 
986 		a->au_id = AUX_phent;
987 		a->au_v = ap->arg_phentsize;
988 		a++;
989 
990 		a->au_id = AUX_phnum;
991 		a->au_v = ap->arg_phnum;
992 		a++;
993 
994 		a->au_id = AUX_pagesz;
995 		a->au_v = PAGE_SIZE;
996 		a++;
997 
998 		a->au_id = AUX_base;
999 		a->au_v = ap->arg_interp;
1000 		a++;
1001 
1002 		a->au_id = AUX_flags;
1003 		a->au_v = 0;
1004 		a++;
1005 
1006 		a->au_id = AUX_entry;
1007 		a->au_v = ap->arg_entry;
1008 		a++;
1009 
1010 #ifdef __HAVE_CPU_HWCAP
1011 		a->au_id = AUX_hwcap;
1012 		a->au_v = hwcap;
1013 		a++;
1014 #endif /* __HAVE_CPU_HWCAP */
1015 
1016 #ifdef __HAVE_CPU_HWCAP2
1017 		a->au_id = AUX_hwcap2;
1018 		a->au_v = hwcap2;
1019 		a++;
1020 #endif /* __HAVE_CPU_HWCAP2 */
1021 
1022 		a->au_id = AUX_openbsd_timekeep;
1023 		a->au_v = p->p_p->ps_timekeep;
1024 		a++;
1025 
1026 		a->au_id = AUX_null;
1027 		a->au_v = 0;
1028 		a++;
1029 
1030 		error = copyout(ai, epp->ep_auxinfo, sizeof ai);
1031 	}
1032 	free(ap, M_TEMP, sizeof *ap);
1033 	if (interp)
1034 		pool_put(&namei_pool, interp);
1035 	return (error);
1036 }
1037 
1038 int
elf_os_pt_note_name(Elf_Note * np)1039 elf_os_pt_note_name(Elf_Note *np)
1040 {
1041 	int i, j;
1042 
1043 	for (i = 0; i < nitems(elf_note_names); i++) {
1044 		size_t namlen = strlen(elf_note_names[i].name);
1045 		if (np->namesz < namlen)
1046 			continue;
1047 		/* verify name padding (after the NUL) is NUL */
1048 		for (j = namlen + 1; j < elfround(np->namesz); j++)
1049 			if (((char *)(np + 1))[j] != '\0')
1050 				continue;
1051 		/* verify desc padding is NUL */
1052 		for (j = np->descsz; j < elfround(np->descsz); j++)
1053 			if (((char *)(np + 1))[j] != '\0')
1054 				continue;
1055 		if (strcmp((char *)(np + 1), elf_note_names[i].name) == 0)
1056 			return elf_note_names[i].id;
1057 	}
1058 	return (0);
1059 }
1060 
1061 int
elf_os_pt_note(struct proc * p,struct exec_package * epp,Elf_Ehdr * eh,int * namesp)1062 elf_os_pt_note(struct proc *p, struct exec_package *epp, Elf_Ehdr *eh, int *namesp)
1063 {
1064 	Elf_Phdr *hph, *ph;
1065 	Elf_Note *np = NULL;
1066 	size_t phsize, offset, pfilesz = 0, total;
1067 	int error, names = 0;
1068 
1069 	hph = mallocarray(eh->e_phnum, sizeof(Elf_Phdr), M_TEMP, M_WAITOK);
1070 	phsize = eh->e_phnum * sizeof(Elf_Phdr);
1071 	if ((error = elf_read_from(p, epp->ep_vp, eh->e_phoff,
1072 	    hph, phsize)) != 0)
1073 		goto out1;
1074 
1075 	for (ph = hph;  ph < &hph[eh->e_phnum]; ph++) {
1076 		if (ph->p_type == PT_OPENBSD_WXNEEDED) {
1077 			epp->ep_flags |= EXEC_WXNEEDED;
1078 			continue;
1079 		}
1080 		if (ph->p_type == PT_OPENBSD_NOBTCFI) {
1081 			epp->ep_flags |= EXEC_NOBTCFI;
1082 			continue;
1083 		}
1084 
1085 		if (ph->p_type != PT_NOTE || ph->p_filesz > 1024)
1086 			continue;
1087 
1088 		if (np && ph->p_filesz != pfilesz) {
1089 			free(np, M_TEMP, pfilesz);
1090 			np = NULL;
1091 		}
1092 		if (!np)
1093 			np = malloc(ph->p_filesz, M_TEMP, M_WAITOK);
1094 		pfilesz = ph->p_filesz;
1095 		if ((error = elf_read_from(p, epp->ep_vp, ph->p_offset,
1096 		    np, ph->p_filesz)) != 0)
1097 			goto out2;
1098 
1099 		for (offset = 0; offset < ph->p_filesz; offset += total) {
1100 			Elf_Note *np2 = (Elf_Note *)((char *)np + offset);
1101 
1102 			if (offset + sizeof(Elf_Note) > ph->p_filesz)
1103 				break;
1104 			total = sizeof(Elf_Note) + elfround(np2->namesz) +
1105 			    elfround(np2->descsz);
1106 			if (offset + total > ph->p_filesz)
1107 				break;
1108 			names |= elf_os_pt_note_name(np2);
1109 		}
1110 	}
1111 
1112 out2:
1113 	free(np, M_TEMP, pfilesz);
1114 out1:
1115 	free(hph, M_TEMP, phsize);
1116 	*namesp = names;
1117 	return ((names & ELF_NOTE_NAME_OPENBSD) ? 0 : ENOEXEC);
1118 }
1119 
1120 /*
1121  * Start of routines related to dumping core
1122  */
1123 
1124 #ifdef SMALL_KERNEL
1125 int
coredump_elf(struct proc * p,void * cookie)1126 coredump_elf(struct proc *p, void *cookie)
1127 {
1128 	return EPERM;
1129 }
1130 #else /* !SMALL_KERNEL */
1131 
1132 struct writesegs_state {
1133 	off_t	notestart;
1134 	off_t	secstart;
1135 	off_t	secoff;
1136 	struct	proc *p;
1137 	void	*iocookie;
1138 	Elf_Phdr *psections;
1139 	size_t	psectionslen;
1140 	size_t	notesize;
1141 	int	npsections;
1142 };
1143 
1144 uvm_coredump_setup_cb	coredump_setup_elf;
1145 uvm_coredump_walk_cb	coredump_walk_elf;
1146 
1147 int	coredump_notes_elf(struct proc *, void *, size_t *);
1148 int	coredump_note_elf(struct proc *, void *, size_t *);
1149 int	coredump_writenote_elf(struct proc *, void *, Elf_Note *,
1150 	    const char *, void *);
1151 
1152 extern vaddr_t sigcode_va;
1153 extern vsize_t sigcode_sz;
1154 
1155 int
coredump_elf(struct proc * p,void * cookie)1156 coredump_elf(struct proc *p, void *cookie)
1157 {
1158 #ifdef DIAGNOSTIC
1159 	off_t offset;
1160 #endif
1161 	struct writesegs_state ws;
1162 	size_t notesize;
1163 	int error, i;
1164 
1165 	ws.p = p;
1166 	ws.iocookie = cookie;
1167 	ws.psections = NULL;
1168 
1169 	/*
1170 	 * Walk the map to get all the segment offsets and lengths,
1171 	 * write out the ELF header.
1172 	 */
1173 	error = uvm_coredump_walkmap(p, coredump_setup_elf,
1174 	    coredump_walk_elf, &ws);
1175 	if (error)
1176 		goto out;
1177 
1178 	error = coredump_write(cookie, UIO_SYSSPACE, ws.psections,
1179 	    ws.psectionslen, 0);
1180 	if (error)
1181 		goto out;
1182 
1183 	/* Write out the notes. */
1184 	error = coredump_notes_elf(p, cookie, &notesize);
1185 	if (error)
1186 		goto out;
1187 
1188 #ifdef DIAGNOSTIC
1189 	if (notesize != ws.notesize)
1190 		panic("coredump: notesize changed: %zu != %zu",
1191 		    ws.notesize, notesize);
1192 	offset = ws.notestart + notesize;
1193 	if (offset != ws.secstart)
1194 		panic("coredump: offset %lld != secstart %lld",
1195 		    (long long) offset, (long long) ws.secstart);
1196 #endif
1197 
1198 	/* Pass 3: finally, write the sections themselves. */
1199 	for (i = 0; i < ws.npsections - 1; i++) {
1200 		Elf_Phdr *pent = &ws.psections[i];
1201 		if (pent->p_filesz == 0)
1202 			continue;
1203 
1204 #ifdef DIAGNOSTIC
1205 		if (offset != pent->p_offset)
1206 			panic("coredump: offset %lld != p_offset[%d] %lld",
1207 			    (long long) offset, i,
1208 			    (long long) pent->p_filesz);
1209 #endif
1210 
1211 		/*
1212 		 * Since the sigcode is mapped execute-only, we can't
1213 		 * read it.  So use the kernel mapping for it instead.
1214 		 */
1215 		if (pent->p_vaddr == p->p_p->ps_sigcode &&
1216 		    pent->p_filesz == sigcode_sz) {
1217 			error = coredump_write(cookie, UIO_SYSSPACE,
1218 			    (void *)sigcode_va, sigcode_sz, 0);
1219 		} else {
1220 			error = coredump_write(cookie, UIO_USERSPACE,
1221 			    (void *)(vaddr_t)pent->p_vaddr, pent->p_filesz,
1222 			    (pent->p_flags & PF_ISVNODE));
1223 		}
1224 		if (error)
1225 			goto out;
1226 
1227 		coredump_unmap(cookie, (vaddr_t)pent->p_vaddr,
1228 		    (vaddr_t)pent->p_vaddr + pent->p_filesz);
1229 
1230 #ifdef DIAGNOSTIC
1231 		offset += ws.psections[i].p_filesz;
1232 #endif
1233 	}
1234 
1235 out:
1236 	free(ws.psections, M_TEMP, ws.psectionslen);
1237 	return (error);
1238 }
1239 
1240 
1241 /*
1242  * Normally we lay out core files like this:
1243  *	[ELF Header] [Program headers] [Notes] [data for PT_LOAD segments]
1244  *
1245  * However, if there's >= 65535 segments then it overflows the field
1246  * in the ELF header, so the standard specifies putting a magic
1247  * number there and saving the real count in the .sh_info field of
1248  * the first *section* header...which requires generating a section
1249  * header.  To avoid confusing tools, we include an .shstrtab section
1250  * as well so all the indexes look valid.  So in this case we lay
1251  * out the core file like this:
1252  *	[ELF Header] [Section Headers] [.shstrtab] [Program headers] \
1253  *	[Notes] [data for PT_LOAD segments]
1254  *
1255  * The 'shstrtab' structure below is data for the second of the two
1256  * section headers, plus the .shstrtab itself, in one const buffer.
1257  */
1258 static const struct {
1259     Elf_Shdr	shdr;
1260     char	shstrtab[sizeof(ELF_SHSTRTAB) + 1];
1261 } shstrtab = {
1262     .shdr = {
1263 	.sh_name = 1,			/* offset in .shstrtab below */
1264 	.sh_type = SHT_STRTAB,
1265 	.sh_offset = sizeof(Elf_Ehdr) + 2*sizeof(Elf_Shdr),
1266 	.sh_size = sizeof(ELF_SHSTRTAB) + 1,
1267 	.sh_addralign = 1,
1268     },
1269     .shstrtab = "\0" ELF_SHSTRTAB,
1270 };
1271 
1272 int
coredump_setup_elf(int segment_count,void * cookie)1273 coredump_setup_elf(int segment_count, void *cookie)
1274 {
1275 	Elf_Ehdr ehdr;
1276 	struct writesegs_state *ws = cookie;
1277 	Elf_Phdr *note;
1278 	int error;
1279 
1280 	/* Get the count of segments, plus one for the PT_NOTE */
1281 	ws->npsections = segment_count + 1;
1282 
1283 	/* Get the size of the notes. */
1284 	error = coredump_notes_elf(ws->p, NULL, &ws->notesize);
1285 	if (error)
1286 		return error;
1287 
1288 	/* Setup the ELF header */
1289 	memset(&ehdr, 0, sizeof(ehdr));
1290 	memcpy(ehdr.e_ident, ELFMAG, SELFMAG);
1291 	ehdr.e_ident[EI_CLASS] = ELF_TARG_CLASS;
1292 	ehdr.e_ident[EI_DATA] = ELF_TARG_DATA;
1293 	ehdr.e_ident[EI_VERSION] = EV_CURRENT;
1294 	/* XXX Should be the OSABI/ABI version of the executable. */
1295 	ehdr.e_ident[EI_OSABI] = ELFOSABI_SYSV;
1296 	ehdr.e_ident[EI_ABIVERSION] = 0;
1297 	ehdr.e_type = ET_CORE;
1298 	/* XXX This should be the e_machine of the executable. */
1299 	ehdr.e_machine = ELF_TARG_MACH;
1300 	ehdr.e_version = EV_CURRENT;
1301 	ehdr.e_entry = 0;
1302 	ehdr.e_flags = 0;
1303 	ehdr.e_ehsize = sizeof(ehdr);
1304 	ehdr.e_phentsize = sizeof(Elf_Phdr);
1305 
1306 	if (ws->npsections < PN_XNUM) {
1307 		ehdr.e_phoff = sizeof(ehdr);
1308 		ehdr.e_shoff = 0;
1309 		ehdr.e_phnum = ws->npsections;
1310 		ehdr.e_shentsize = 0;
1311 		ehdr.e_shnum = 0;
1312 		ehdr.e_shstrndx = 0;
1313 	} else {
1314 		/* too many segments, use extension setup */
1315 		ehdr.e_shoff = sizeof(ehdr);
1316 		ehdr.e_phnum = PN_XNUM;
1317 		ehdr.e_shentsize = sizeof(Elf_Shdr);
1318 		ehdr.e_shnum = 2;
1319 		ehdr.e_shstrndx = 1;
1320 		ehdr.e_phoff = shstrtab.shdr.sh_offset + shstrtab.shdr.sh_size;
1321 	}
1322 
1323 	/* Write out the ELF header. */
1324 	error = coredump_write(ws->iocookie, UIO_SYSSPACE, &ehdr, sizeof(ehdr), 0);
1325 	if (error)
1326 		return error;
1327 
1328 	/*
1329 	 * If an section header is needed to store extension info, write
1330 	 * it out after the ELF header and before the program header.
1331 	 */
1332 	if (ehdr.e_shnum != 0) {
1333 		Elf_Shdr shdr = { .sh_info = ws->npsections };
1334 		error = coredump_write(ws->iocookie, UIO_SYSSPACE, &shdr,
1335 		    sizeof shdr, 0);
1336 		if (error)
1337 			return error;
1338 		error = coredump_write(ws->iocookie, UIO_SYSSPACE, &shstrtab,
1339 		    sizeof(shstrtab.shdr) + sizeof(shstrtab.shstrtab), 0);
1340 		if (error)
1341 			return error;
1342 	}
1343 
1344 	/*
1345 	 * Allocate the segment header array and setup to collect
1346 	 * the section sizes and offsets
1347 	 */
1348 	ws->psections = mallocarray(ws->npsections, sizeof(Elf_Phdr),
1349 	    M_TEMP, M_WAITOK|M_CANFAIL|M_ZERO);
1350 	if (ws->psections == NULL)
1351 		return ENOMEM;
1352 	ws->psectionslen = ws->npsections * sizeof(Elf_Phdr);
1353 
1354 	ws->notestart = ehdr.e_phoff + ws->psectionslen;
1355 	ws->secstart = ws->notestart + ws->notesize;
1356 	ws->secoff = ws->secstart;
1357 
1358 	/* Fill in the PT_NOTE segment header in the last slot */
1359 	note = &ws->psections[ws->npsections - 1];
1360 	note->p_type = PT_NOTE;
1361 	note->p_offset = ws->notestart;
1362 	note->p_vaddr = 0;
1363 	note->p_paddr = 0;
1364 	note->p_filesz = ws->notesize;
1365 	note->p_memsz = 0;
1366 	note->p_flags = PF_R;
1367 	note->p_align = ELFROUNDSIZE;
1368 
1369 	return (0);
1370 }
1371 
1372 int
coredump_walk_elf(vaddr_t start,vaddr_t realend,vaddr_t end,vm_prot_t prot,int isvnode,int nsegment,void * cookie)1373 coredump_walk_elf(vaddr_t start, vaddr_t realend, vaddr_t end, vm_prot_t prot,
1374     int isvnode, int nsegment, void *cookie)
1375 {
1376 	struct writesegs_state *ws = cookie;
1377 	Elf_Phdr phdr;
1378 	vsize_t size, realsize;
1379 
1380 	size = end - start;
1381 	realsize = realend - start;
1382 
1383 	phdr.p_type = PT_LOAD;
1384 	phdr.p_offset = ws->secoff;
1385 	phdr.p_vaddr = start;
1386 	phdr.p_paddr = 0;
1387 	phdr.p_filesz = realsize;
1388 	phdr.p_memsz = size;
1389 	phdr.p_flags = 0;
1390 	if (prot & PROT_READ)
1391 		phdr.p_flags |= PF_R;
1392 	if (prot & PROT_WRITE)
1393 		phdr.p_flags |= PF_W;
1394 	if (prot & PROT_EXEC)
1395 		phdr.p_flags |= PF_X;
1396 	if (isvnode)
1397 		phdr.p_flags |= PF_ISVNODE;
1398 	phdr.p_align = PAGE_SIZE;
1399 
1400 	ws->secoff += phdr.p_filesz;
1401 	ws->psections[nsegment] = phdr;
1402 
1403 	return (0);
1404 }
1405 
1406 int
coredump_notes_elf(struct proc * p,void * iocookie,size_t * sizep)1407 coredump_notes_elf(struct proc *p, void *iocookie, size_t *sizep)
1408 {
1409 	struct elfcore_procinfo cpi;
1410 	Elf_Note nhdr;
1411 	struct process *pr = p->p_p;
1412 	struct proc *q;
1413 	size_t size, notesize;
1414 	int error;
1415 
1416 	KASSERT(!P_HASSIBLING(p) || pr->ps_single != NULL);
1417 	size = 0;
1418 
1419 	/* First, write an elfcore_procinfo. */
1420 	notesize = sizeof(nhdr) + elfround(sizeof("OpenBSD")) +
1421 	    elfround(sizeof(cpi));
1422 	if (iocookie) {
1423 		memset(&cpi, 0, sizeof(cpi));
1424 
1425 		cpi.cpi_version = ELFCORE_PROCINFO_VERSION;
1426 		cpi.cpi_cpisize = sizeof(cpi);
1427 		cpi.cpi_signo = p->p_sisig;
1428 		cpi.cpi_sigcode = p->p_sicode;
1429 
1430 		cpi.cpi_sigpend = p->p_siglist | pr->ps_siglist;
1431 		cpi.cpi_sigmask = p->p_sigmask;
1432 		cpi.cpi_sigignore = pr->ps_sigacts->ps_sigignore;
1433 		cpi.cpi_sigcatch = pr->ps_sigacts->ps_sigcatch;
1434 
1435 		cpi.cpi_pid = pr->ps_pid;
1436 		cpi.cpi_ppid = pr->ps_ppid;
1437 		cpi.cpi_pgrp = pr->ps_pgid;
1438 		if (pr->ps_session->s_leader)
1439 			cpi.cpi_sid = pr->ps_session->s_leader->ps_pid;
1440 		else
1441 			cpi.cpi_sid = 0;
1442 
1443 		cpi.cpi_ruid = p->p_ucred->cr_ruid;
1444 		cpi.cpi_euid = p->p_ucred->cr_uid;
1445 		cpi.cpi_svuid = p->p_ucred->cr_svuid;
1446 
1447 		cpi.cpi_rgid = p->p_ucred->cr_rgid;
1448 		cpi.cpi_egid = p->p_ucred->cr_gid;
1449 		cpi.cpi_svgid = p->p_ucred->cr_svgid;
1450 
1451 		(void)strlcpy(cpi.cpi_name, pr->ps_comm, sizeof(cpi.cpi_name));
1452 
1453 		nhdr.namesz = sizeof("OpenBSD");
1454 		nhdr.descsz = sizeof(cpi);
1455 		nhdr.type = NT_OPENBSD_PROCINFO;
1456 
1457 		error = coredump_writenote_elf(p, iocookie, &nhdr,
1458 		    "OpenBSD", &cpi);
1459 		if (error)
1460 			return (error);
1461 	}
1462 	size += notesize;
1463 
1464 	/* Second, write an NT_OPENBSD_AUXV note. */
1465 	notesize = sizeof(nhdr) + elfround(sizeof("OpenBSD")) +
1466 	    elfround(ELF_AUX_WORDS * sizeof(char *));
1467 	if (iocookie && pr->ps_auxinfo) {
1468 
1469 		nhdr.namesz = sizeof("OpenBSD");
1470 		nhdr.descsz = ELF_AUX_WORDS * sizeof(char *);
1471 		nhdr.type = NT_OPENBSD_AUXV;
1472 
1473 		error = coredump_write(iocookie, UIO_SYSSPACE,
1474 		    &nhdr, sizeof(nhdr), 0);
1475 		if (error)
1476 			return (error);
1477 
1478 		error = coredump_write(iocookie, UIO_SYSSPACE,
1479 		    "OpenBSD", elfround(nhdr.namesz), 0);
1480 		if (error)
1481 			return (error);
1482 
1483 		error = coredump_write(iocookie, UIO_USERSPACE,
1484 		    (caddr_t)pr->ps_auxinfo, nhdr.descsz, 0);
1485 		if (error)
1486 			return (error);
1487 	}
1488 	size += notesize;
1489 
1490 #ifdef PT_WCOOKIE
1491 	notesize = sizeof(nhdr) + elfround(sizeof("OpenBSD")) +
1492 	    elfround(sizeof(register_t));
1493 	if (iocookie) {
1494 		register_t wcookie;
1495 
1496 		nhdr.namesz = sizeof("OpenBSD");
1497 		nhdr.descsz = sizeof(register_t);
1498 		nhdr.type = NT_OPENBSD_WCOOKIE;
1499 
1500 		wcookie = process_get_wcookie(p);
1501 		error = coredump_writenote_elf(p, iocookie, &nhdr,
1502 		    "OpenBSD", &wcookie);
1503 		if (error)
1504 			return (error);
1505 	}
1506 	size += notesize;
1507 #endif
1508 
1509 	/*
1510 	 * Now write the register info for the thread that caused the
1511 	 * coredump.
1512 	 */
1513 	error = coredump_note_elf(p, iocookie, &notesize);
1514 	if (error)
1515 		return (error);
1516 	size += notesize;
1517 
1518 	/*
1519 	 * Now, for each thread, write the register info and any other
1520 	 * per-thread notes.  Since we're dumping core, all the other
1521 	 * threads in the process have been stopped and the list can't
1522 	 * change.
1523 	 */
1524 	TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
1525 		if (q == p)		/* we've taken care of this thread */
1526 			continue;
1527 		error = coredump_note_elf(q, iocookie, &notesize);
1528 		if (error)
1529 			return (error);
1530 		size += notesize;
1531 	}
1532 
1533 	*sizep = size;
1534 	return (0);
1535 }
1536 
1537 int
coredump_note_elf(struct proc * p,void * iocookie,size_t * sizep)1538 coredump_note_elf(struct proc *p, void *iocookie, size_t *sizep)
1539 {
1540 	Elf_Note nhdr;
1541 	int size, notesize, error;
1542 	int namesize;
1543 	char name[64+ELFROUNDSIZE];
1544 	struct reg intreg;
1545 #ifdef PT_GETFPREGS
1546 	struct fpreg freg;
1547 #endif
1548 #ifdef PT_PACMASK
1549 	register_t pacmask[2];
1550 #endif
1551 
1552 	size = 0;
1553 
1554 	snprintf(name, sizeof(name)-ELFROUNDSIZE, "%s@%d",
1555 	    "OpenBSD", p->p_tid + THREAD_PID_OFFSET);
1556 	namesize = strlen(name) + 1;
1557 	memset(name + namesize, 0, elfround(namesize) - namesize);
1558 
1559 	notesize = sizeof(nhdr) + elfround(namesize) + elfround(sizeof(intreg));
1560 	if (iocookie) {
1561 		error = process_read_regs(p, &intreg);
1562 		if (error)
1563 			return (error);
1564 
1565 		nhdr.namesz = namesize;
1566 		nhdr.descsz = sizeof(intreg);
1567 		nhdr.type = NT_OPENBSD_REGS;
1568 
1569 		error = coredump_writenote_elf(p, iocookie, &nhdr,
1570 		    name, &intreg);
1571 		if (error)
1572 			return (error);
1573 
1574 	}
1575 	size += notesize;
1576 
1577 #ifdef PT_GETFPREGS
1578 	notesize = sizeof(nhdr) + elfround(namesize) + elfround(sizeof(freg));
1579 	if (iocookie) {
1580 		error = process_read_fpregs(p, &freg);
1581 		if (error)
1582 			return (error);
1583 
1584 		nhdr.namesz = namesize;
1585 		nhdr.descsz = sizeof(freg);
1586 		nhdr.type = NT_OPENBSD_FPREGS;
1587 
1588 		error = coredump_writenote_elf(p, iocookie, &nhdr, name, &freg);
1589 		if (error)
1590 			return (error);
1591 	}
1592 	size += notesize;
1593 #endif
1594 
1595 #ifdef PT_PACMASK
1596 	notesize = sizeof(nhdr) + elfround(namesize) +
1597 	    elfround(sizeof(pacmask));
1598 	if (iocookie) {
1599 		pacmask[0] = pacmask[1] = process_get_pacmask(p);
1600 
1601 		nhdr.namesz = namesize;
1602 		nhdr.descsz = sizeof(pacmask);
1603 		nhdr.type = NT_OPENBSD_PACMASK;
1604 
1605 		error = coredump_writenote_elf(p, iocookie, &nhdr,
1606 		    name, &pacmask);
1607 		if (error)
1608 			return (error);
1609 	}
1610 	size += notesize;
1611 #endif
1612 
1613 	*sizep = size;
1614 	/* XXX Add hook for machdep per-LWP notes. */
1615 	return (0);
1616 }
1617 
1618 int
coredump_writenote_elf(struct proc * p,void * cookie,Elf_Note * nhdr,const char * name,void * data)1619 coredump_writenote_elf(struct proc *p, void *cookie, Elf_Note *nhdr,
1620     const char *name, void *data)
1621 {
1622 	int error;
1623 
1624 	error = coredump_write(cookie, UIO_SYSSPACE, nhdr, sizeof(*nhdr), 0);
1625 	if (error)
1626 		return error;
1627 
1628 	error = coredump_write(cookie, UIO_SYSSPACE, name,
1629 	    elfround(nhdr->namesz), 0);
1630 	if (error)
1631 		return error;
1632 
1633 	return coredump_write(cookie, UIO_SYSSPACE, data, nhdr->descsz, 0);
1634 }
1635 #endif /* !SMALL_KERNEL */
1636