1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2017 Dell EMC
5 * Copyright (c) 2000-2001, 2003 David O'Brien
6 * Copyright (c) 1995-1996 Søren Schmidt
7 * Copyright (c) 1996 Peter Wemm
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer
15 * in this position and unchanged.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "opt_capsicum.h"
35
36 #include <sys/param.h>
37 #include <sys/capsicum.h>
38 #include <sys/compressor.h>
39 #include <sys/exec.h>
40 #include <sys/fcntl.h>
41 #include <sys/imgact.h>
42 #include <sys/imgact_elf.h>
43 #include <sys/jail.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/mman.h>
49 #include <sys/namei.h>
50 #include <sys/proc.h>
51 #include <sys/procfs.h>
52 #include <sys/ptrace.h>
53 #include <sys/racct.h>
54 #include <sys/reg.h>
55 #include <sys/resourcevar.h>
56 #include <sys/rwlock.h>
57 #include <sys/sbuf.h>
58 #include <sys/sf_buf.h>
59 #include <sys/smp.h>
60 #include <sys/systm.h>
61 #include <sys/signalvar.h>
62 #include <sys/stat.h>
63 #include <sys/sx.h>
64 #include <sys/syscall.h>
65 #include <sys/sysctl.h>
66 #include <sys/sysent.h>
67 #include <sys/vnode.h>
68 #include <sys/syslog.h>
69 #include <sys/eventhandler.h>
70 #include <sys/user.h>
71
72 #include <vm/vm.h>
73 #include <vm/vm_kern.h>
74 #include <vm/vm_param.h>
75 #include <vm/pmap.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_object.h>
78 #include <vm/vm_extern.h>
79
80 #include <machine/elf.h>
81 #include <machine/md_var.h>
82
83 #define ELF_NOTE_ROUNDSIZE 4
84 #define OLD_EI_BRAND 8
85
86 static int __elfN(check_header)(const Elf_Ehdr *hdr);
87 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
88 const char *interp, int32_t *osrel, uint32_t *fctl0);
89 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
90 u_long *entry);
91 static int __elfN(load_section)(const struct image_params *imgp,
92 vm_ooffset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
93 vm_prot_t prot);
94 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
95 static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note,
96 int32_t *osrel);
97 static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel);
98 static bool __elfN(check_note)(struct image_params *imgp,
99 Elf_Brandnote *checknote, int32_t *osrel, bool *has_fctl0,
100 uint32_t *fctl0);
101 static vm_prot_t __elfN(trans_prot)(Elf_Word);
102 static Elf_Word __elfN(untrans_prot)(vm_prot_t);
103 static size_t __elfN(prepare_register_notes)(struct thread *td,
104 struct note_info_list *list, struct thread *target_td);
105
106 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE),
107 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
108 "");
109
110 int __elfN(fallback_brand) = -1;
111 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
112 fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0,
113 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
114
115 static int elf_legacy_coredump = 0;
116 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
117 &elf_legacy_coredump, 0,
118 "include all and only RW pages in core dumps");
119
120 int __elfN(nxstack) =
121 #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */ || \
122 (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__) || \
123 defined(__riscv)
124 1;
125 #else
126 0;
127 #endif
128 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
129 nxstack, CTLFLAG_RW, &__elfN(nxstack), 0,
130 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack");
131
132 #if defined(__amd64__)
133 static int __elfN(vdso) = 1;
134 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
135 vdso, CTLFLAG_RWTUN, &__elfN(vdso), 0,
136 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable vdso preloading");
137 #else
138 static int __elfN(vdso) = 0;
139 #endif
140
141 #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__))
142 int i386_read_exec = 0;
143 SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0,
144 "enable execution from readable segments");
145 #endif
146
147 static u_long __elfN(pie_base) = ET_DYN_LOAD_ADDR;
148 static int
sysctl_pie_base(SYSCTL_HANDLER_ARGS)149 sysctl_pie_base(SYSCTL_HANDLER_ARGS)
150 {
151 u_long val;
152 int error;
153
154 val = __elfN(pie_base);
155 error = sysctl_handle_long(oidp, &val, 0, req);
156 if (error != 0 || req->newptr == NULL)
157 return (error);
158 if ((val & PAGE_MASK) != 0)
159 return (EINVAL);
160 __elfN(pie_base) = val;
161 return (0);
162 }
163 SYSCTL_PROC(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, pie_base,
164 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0,
165 sysctl_pie_base, "LU",
166 "PIE load base without randomization");
167
168 SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr,
169 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
170 "");
171 #define ASLR_NODE_OID __CONCAT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), _aslr)
172
173 /*
174 * Enable ASLR by default for 64-bit non-PIE binaries. 32-bit architectures
175 * have limited address space (which can cause issues for applications with
176 * high memory use) so we leave it off there.
177 */
178 static int __elfN(aslr_enabled) = __ELF_WORD_SIZE == 64;
179 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN,
180 &__elfN(aslr_enabled), 0,
181 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
182 ": enable address map randomization");
183
184 /*
185 * Enable ASLR by default for 64-bit PIE binaries.
186 */
187 static int __elfN(pie_aslr_enabled) = __ELF_WORD_SIZE == 64;
188 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN,
189 &__elfN(pie_aslr_enabled), 0,
190 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
191 ": enable address map randomization for PIE binaries");
192
193 /*
194 * Sbrk is deprecated and it can be assumed that in most cases it will not be
195 * used anyway. This setting is valid only with ASLR enabled, and allows ASLR
196 * to use the bss grow region.
197 */
198 static int __elfN(aslr_honor_sbrk) = 0;
199 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW,
200 &__elfN(aslr_honor_sbrk), 0,
201 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used");
202
203 static int __elfN(aslr_stack) = __ELF_WORD_SIZE == 64;
204 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, stack, CTLFLAG_RWTUN,
205 &__elfN(aslr_stack), 0,
206 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
207 ": enable stack address randomization");
208
209 static int __elfN(aslr_shared_page) = __ELF_WORD_SIZE == 64;
210 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, shared_page, CTLFLAG_RWTUN,
211 &__elfN(aslr_shared_page), 0,
212 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
213 ": enable shared page address randomization");
214
215 static int __elfN(sigfastblock) = 1;
216 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock,
217 CTLFLAG_RWTUN, &__elfN(sigfastblock), 0,
218 "enable sigfastblock for new processes");
219
220 static bool __elfN(allow_wx) = true;
221 SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx,
222 CTLFLAG_RWTUN, &__elfN(allow_wx), 0,
223 "Allow pages to be mapped simultaneously writable and executable");
224
225 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
226
227 #define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a))
228
229 Elf_Brandnote __elfN(freebsd_brandnote) = {
230 .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR),
231 .hdr.n_descsz = sizeof(int32_t),
232 .hdr.n_type = NT_FREEBSD_ABI_TAG,
233 .vendor = FREEBSD_ABI_VENDOR,
234 .flags = BN_TRANSLATE_OSREL,
235 .trans_osrel = __elfN(freebsd_trans_osrel)
236 };
237
238 static bool
__elfN(freebsd_trans_osrel)239 __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel)
240 {
241 uintptr_t p;
242
243 p = (uintptr_t)(note + 1);
244 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
245 *osrel = *(const int32_t *)(p);
246
247 return (true);
248 }
249
250 static int GNU_KFREEBSD_ABI_DESC = 3;
251
252 Elf_Brandnote __elfN(kfreebsd_brandnote) = {
253 .hdr.n_namesz = sizeof(GNU_ABI_VENDOR),
254 .hdr.n_descsz = 16, /* XXX at least 16 */
255 .hdr.n_type = 1,
256 .vendor = GNU_ABI_VENDOR,
257 .flags = BN_TRANSLATE_OSREL,
258 .trans_osrel = kfreebsd_trans_osrel
259 };
260
261 static bool
kfreebsd_trans_osrel(const Elf_Note * note,int32_t * osrel)262 kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel)
263 {
264 const Elf32_Word *desc;
265 uintptr_t p;
266
267 p = (uintptr_t)(note + 1);
268 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
269
270 desc = (const Elf32_Word *)p;
271 if (desc[0] != GNU_KFREEBSD_ABI_DESC)
272 return (false);
273
274 /*
275 * Debian GNU/kFreeBSD embed the earliest compatible kernel version
276 * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way.
277 */
278 *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3];
279
280 return (true);
281 }
282
283 int
__elfN(insert_brand_entry)284 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
285 {
286 int i;
287
288 for (i = 0; i < MAX_BRANDS; i++) {
289 if (elf_brand_list[i] == NULL) {
290 elf_brand_list[i] = entry;
291 break;
292 }
293 }
294 if (i == MAX_BRANDS) {
295 printf("WARNING: %s: could not insert brandinfo entry: %p\n",
296 __func__, entry);
297 return (-1);
298 }
299 return (0);
300 }
301
302 int
__elfN(remove_brand_entry)303 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
304 {
305 int i;
306
307 for (i = 0; i < MAX_BRANDS; i++) {
308 if (elf_brand_list[i] == entry) {
309 elf_brand_list[i] = NULL;
310 break;
311 }
312 }
313 if (i == MAX_BRANDS)
314 return (-1);
315 return (0);
316 }
317
318 bool
__elfN(brand_inuse)319 __elfN(brand_inuse)(Elf_Brandinfo *entry)
320 {
321 struct proc *p;
322 bool rval = false;
323
324 sx_slock(&allproc_lock);
325 FOREACH_PROC_IN_SYSTEM(p) {
326 if (p->p_sysent == entry->sysvec) {
327 rval = true;
328 break;
329 }
330 }
331 sx_sunlock(&allproc_lock);
332
333 return (rval);
334 }
335
336 static Elf_Brandinfo *
__elfN(get_brandinfo)337 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
338 int32_t *osrel, uint32_t *fctl0)
339 {
340 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
341 Elf_Brandinfo *bi, *bi_m;
342 bool ret, has_fctl0;
343 int i, interp_name_len;
344
345 interp_name_len = interp != NULL ? strlen(interp) + 1 : 0;
346
347 /*
348 * We support four types of branding -- (1) the ELF EI_OSABI field
349 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
350 * branding w/in the ELF header, (3) path of the `interp_path'
351 * field, and (4) the ".note.ABI-tag" ELF section.
352 */
353
354 /* Look for an ".note.ABI-tag" ELF section */
355 bi_m = NULL;
356 for (i = 0; i < MAX_BRANDS; i++) {
357 bi = elf_brand_list[i];
358 if (bi == NULL)
359 continue;
360 if (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)
361 continue;
362 if (hdr->e_machine == bi->machine && (bi->flags &
363 (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) {
364 has_fctl0 = false;
365 *fctl0 = 0;
366 *osrel = 0;
367 ret = __elfN(check_note)(imgp, bi->brand_note, osrel,
368 &has_fctl0, fctl0);
369 /* Give brand a chance to veto check_note's guess */
370 if (ret && bi->header_supported) {
371 ret = bi->header_supported(imgp, osrel,
372 has_fctl0 ? fctl0 : NULL);
373 }
374 /*
375 * If note checker claimed the binary, but the
376 * interpreter path in the image does not
377 * match default one for the brand, try to
378 * search for other brands with the same
379 * interpreter. Either there is better brand
380 * with the right interpreter, or, failing
381 * this, we return first brand which accepted
382 * our note and, optionally, header.
383 */
384 if (ret && bi_m == NULL && interp != NULL &&
385 (bi->interp_path == NULL ||
386 (strlen(bi->interp_path) + 1 != interp_name_len ||
387 strncmp(interp, bi->interp_path, interp_name_len)
388 != 0))) {
389 bi_m = bi;
390 ret = 0;
391 }
392 if (ret)
393 return (bi);
394 }
395 }
396 if (bi_m != NULL)
397 return (bi_m);
398
399 /* If the executable has a brand, search for it in the brand list. */
400 for (i = 0; i < MAX_BRANDS; i++) {
401 bi = elf_brand_list[i];
402 if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 ||
403 (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0))
404 continue;
405 if (hdr->e_machine == bi->machine &&
406 (hdr->e_ident[EI_OSABI] == bi->brand ||
407 (bi->compat_3_brand != NULL &&
408 strcmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
409 bi->compat_3_brand) == 0))) {
410 /* Looks good, but give brand a chance to veto */
411 if (bi->header_supported == NULL ||
412 bi->header_supported(imgp, NULL, NULL)) {
413 /*
414 * Again, prefer strictly matching
415 * interpreter path.
416 */
417 if (interp_name_len == 0 &&
418 bi->interp_path == NULL)
419 return (bi);
420 if (bi->interp_path != NULL &&
421 strlen(bi->interp_path) + 1 ==
422 interp_name_len && strncmp(interp,
423 bi->interp_path, interp_name_len) == 0)
424 return (bi);
425 if (bi_m == NULL)
426 bi_m = bi;
427 }
428 }
429 }
430 if (bi_m != NULL)
431 return (bi_m);
432
433 /* No known brand, see if the header is recognized by any brand */
434 for (i = 0; i < MAX_BRANDS; i++) {
435 bi = elf_brand_list[i];
436 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY ||
437 bi->header_supported == NULL)
438 continue;
439 if (hdr->e_machine == bi->machine) {
440 ret = bi->header_supported(imgp, NULL, NULL);
441 if (ret)
442 return (bi);
443 }
444 }
445
446 /* Lacking a known brand, search for a recognized interpreter. */
447 if (interp != NULL) {
448 for (i = 0; i < MAX_BRANDS; i++) {
449 bi = elf_brand_list[i];
450 if (bi == NULL || (bi->flags &
451 (BI_BRAND_NOTE_MANDATORY | BI_BRAND_ONLY_STATIC))
452 != 0)
453 continue;
454 if (hdr->e_machine == bi->machine &&
455 bi->interp_path != NULL &&
456 /* ELF image p_filesz includes terminating zero */
457 strlen(bi->interp_path) + 1 == interp_name_len &&
458 strncmp(interp, bi->interp_path, interp_name_len)
459 == 0 && (bi->header_supported == NULL ||
460 bi->header_supported(imgp, NULL, NULL)))
461 return (bi);
462 }
463 }
464
465 /* Lacking a recognized interpreter, try the default brand */
466 for (i = 0; i < MAX_BRANDS; i++) {
467 bi = elf_brand_list[i];
468 if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 ||
469 (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0))
470 continue;
471 if (hdr->e_machine == bi->machine &&
472 __elfN(fallback_brand) == bi->brand &&
473 (bi->header_supported == NULL ||
474 bi->header_supported(imgp, NULL, NULL)))
475 return (bi);
476 }
477 return (NULL);
478 }
479
480 static bool
__elfN(phdr_in_zero_page)481 __elfN(phdr_in_zero_page)(const Elf_Ehdr *hdr)
482 {
483 return (hdr->e_phoff <= PAGE_SIZE &&
484 (u_int)hdr->e_phentsize * hdr->e_phnum <= PAGE_SIZE - hdr->e_phoff);
485 }
486
487 static int
__elfN(check_header)488 __elfN(check_header)(const Elf_Ehdr *hdr)
489 {
490 Elf_Brandinfo *bi;
491 int i;
492
493 if (!IS_ELF(*hdr) ||
494 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
495 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
496 hdr->e_ident[EI_VERSION] != EV_CURRENT ||
497 hdr->e_phentsize != sizeof(Elf_Phdr) ||
498 hdr->e_version != ELF_TARG_VER)
499 return (ENOEXEC);
500
501 /*
502 * Make sure we have at least one brand for this machine.
503 */
504
505 for (i = 0; i < MAX_BRANDS; i++) {
506 bi = elf_brand_list[i];
507 if (bi != NULL && bi->machine == hdr->e_machine)
508 break;
509 }
510 if (i == MAX_BRANDS)
511 return (ENOEXEC);
512
513 return (0);
514 }
515
516 static int
__elfN(map_partial)517 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
518 vm_offset_t start, vm_offset_t end, vm_prot_t prot)
519 {
520 struct sf_buf *sf;
521 int error;
522 vm_offset_t off;
523
524 /*
525 * Create the page if it doesn't exist yet. Ignore errors.
526 */
527 vm_map_fixed(map, NULL, 0, trunc_page(start), round_page(end) -
528 trunc_page(start), VM_PROT_ALL, VM_PROT_ALL, MAP_CHECK_EXCL);
529
530 /*
531 * Find the page from the underlying object.
532 */
533 if (object != NULL) {
534 sf = vm_imgact_map_page(object, offset);
535 if (sf == NULL)
536 return (KERN_FAILURE);
537 off = offset - trunc_page(offset);
538 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
539 end - start);
540 vm_imgact_unmap_page(sf);
541 if (error != 0)
542 return (KERN_FAILURE);
543 }
544
545 return (KERN_SUCCESS);
546 }
547
548 static int
__elfN(map_insert)549 __elfN(map_insert)(const struct image_params *imgp, vm_map_t map,
550 vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end,
551 vm_prot_t prot, int cow)
552 {
553 struct sf_buf *sf;
554 vm_offset_t off;
555 vm_size_t sz;
556 int error, locked, rv;
557
558 if (start != trunc_page(start)) {
559 rv = __elfN(map_partial)(map, object, offset, start,
560 round_page(start), prot);
561 if (rv != KERN_SUCCESS)
562 return (rv);
563 offset += round_page(start) - start;
564 start = round_page(start);
565 }
566 if (end != round_page(end)) {
567 rv = __elfN(map_partial)(map, object, offset +
568 trunc_page(end) - start, trunc_page(end), end, prot);
569 if (rv != KERN_SUCCESS)
570 return (rv);
571 end = trunc_page(end);
572 }
573 if (start >= end)
574 return (KERN_SUCCESS);
575 if ((offset & PAGE_MASK) != 0) {
576 /*
577 * The mapping is not page aligned. This means that we have
578 * to copy the data.
579 */
580 rv = vm_map_fixed(map, NULL, 0, start, end - start,
581 prot | VM_PROT_WRITE, VM_PROT_ALL, MAP_CHECK_EXCL);
582 if (rv != KERN_SUCCESS)
583 return (rv);
584 if (object == NULL)
585 return (KERN_SUCCESS);
586 for (; start < end; start += sz) {
587 sf = vm_imgact_map_page(object, offset);
588 if (sf == NULL)
589 return (KERN_FAILURE);
590 off = offset - trunc_page(offset);
591 sz = end - start;
592 if (sz > PAGE_SIZE - off)
593 sz = PAGE_SIZE - off;
594 error = copyout((caddr_t)sf_buf_kva(sf) + off,
595 (caddr_t)start, sz);
596 vm_imgact_unmap_page(sf);
597 if (error != 0)
598 return (KERN_FAILURE);
599 offset += sz;
600 }
601 } else {
602 vm_object_reference(object);
603 rv = vm_map_fixed(map, object, offset, start, end - start,
604 prot, VM_PROT_ALL, cow | MAP_CHECK_EXCL |
605 (object != NULL ? MAP_VN_EXEC : 0));
606 if (rv != KERN_SUCCESS) {
607 locked = VOP_ISLOCKED(imgp->vp);
608 VOP_UNLOCK(imgp->vp);
609 vm_object_deallocate(object);
610 vn_lock(imgp->vp, locked | LK_RETRY);
611 return (rv);
612 } else if (object != NULL) {
613 MPASS(imgp->vp->v_object == object);
614 VOP_SET_TEXT_CHECKED(imgp->vp);
615 }
616 }
617 return (KERN_SUCCESS);
618 }
619
__elfN(load_section)620 static int __elfN(load_section)(const struct image_params *imgp,
621 vm_ooffset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
622 vm_prot_t prot)
623 {
624 struct sf_buf *sf;
625 size_t map_len;
626 vm_map_t map;
627 vm_object_t object;
628 vm_offset_t map_addr;
629 int error, rv, cow;
630 size_t copy_len;
631 vm_ooffset_t file_addr;
632
633 /*
634 * It's necessary to fail if the filsz + offset taken from the
635 * header is greater than the actual file pager object's size.
636 * If we were to allow this, then the vm_map_find() below would
637 * walk right off the end of the file object and into the ether.
638 *
639 * While I'm here, might as well check for something else that
640 * is invalid: filsz cannot be greater than memsz.
641 */
642 if ((filsz != 0 && (off_t)filsz + offset > imgp->attr->va_size) ||
643 filsz > memsz) {
644 uprintf("elf_load_section: truncated ELF file\n");
645 return (ENOEXEC);
646 }
647
648 object = imgp->object;
649 map = &imgp->proc->p_vmspace->vm_map;
650 map_addr = trunc_page((vm_offset_t)vmaddr);
651 file_addr = trunc_page(offset);
652
653 /*
654 * We have two choices. We can either clear the data in the last page
655 * of an oversized mapping, or we can start the anon mapping a page
656 * early and copy the initialized data into that first page. We
657 * choose the second.
658 */
659 if (filsz == 0)
660 map_len = 0;
661 else if (memsz > filsz)
662 map_len = trunc_page(offset + filsz) - file_addr;
663 else
664 map_len = round_page(offset + filsz) - file_addr;
665
666 if (map_len != 0) {
667 /* cow flags: don't dump readonly sections in core */
668 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
669 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
670
671 rv = __elfN(map_insert)(imgp, map, object, file_addr,
672 map_addr, map_addr + map_len, prot, cow);
673 if (rv != KERN_SUCCESS)
674 return (EINVAL);
675
676 /* we can stop now if we've covered it all */
677 if (memsz == filsz)
678 return (0);
679 }
680
681 /*
682 * We have to get the remaining bit of the file into the first part
683 * of the oversized map segment. This is normally because the .data
684 * segment in the file is extended to provide bss. It's a neat idea
685 * to try and save a page, but it's a pain in the behind to implement.
686 */
687 copy_len = filsz == 0 ? 0 : (offset + filsz) - trunc_page(offset +
688 filsz);
689 map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
690 map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;
691
692 /* This had damn well better be true! */
693 if (map_len != 0) {
694 rv = __elfN(map_insert)(imgp, map, NULL, 0, map_addr,
695 map_addr + map_len, prot, 0);
696 if (rv != KERN_SUCCESS)
697 return (EINVAL);
698 }
699
700 if (copy_len != 0) {
701 sf = vm_imgact_map_page(object, offset + filsz);
702 if (sf == NULL)
703 return (EIO);
704
705 /* send the page fragment to user space */
706 error = copyout((caddr_t)sf_buf_kva(sf), (caddr_t)map_addr,
707 copy_len);
708 vm_imgact_unmap_page(sf);
709 if (error != 0)
710 return (error);
711 }
712
713 /*
714 * Remove write access to the page if it was only granted by map_insert
715 * to allow copyout.
716 */
717 if ((prot & VM_PROT_WRITE) == 0)
718 vm_map_protect(map, trunc_page(map_addr), round_page(map_addr +
719 map_len), prot, 0, VM_MAP_PROTECT_SET_PROT);
720
721 return (0);
722 }
723
724 static int
__elfN(load_sections)725 __elfN(load_sections)(const struct image_params *imgp, const Elf_Ehdr *hdr,
726 const Elf_Phdr *phdr, u_long rbase, u_long *base_addrp)
727 {
728 vm_prot_t prot;
729 u_long base_addr;
730 bool first;
731 int error, i;
732
733 ASSERT_VOP_LOCKED(imgp->vp, __func__);
734
735 base_addr = 0;
736 first = true;
737
738 for (i = 0; i < hdr->e_phnum; i++) {
739 if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0)
740 continue;
741
742 /* Loadable segment */
743 prot = __elfN(trans_prot)(phdr[i].p_flags);
744 error = __elfN(load_section)(imgp, phdr[i].p_offset,
745 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
746 phdr[i].p_memsz, phdr[i].p_filesz, prot);
747 if (error != 0)
748 return (error);
749
750 /*
751 * Establish the base address if this is the first segment.
752 */
753 if (first) {
754 base_addr = trunc_page(phdr[i].p_vaddr + rbase);
755 first = false;
756 }
757 }
758
759 if (base_addrp != NULL)
760 *base_addrp = base_addr;
761
762 return (0);
763 }
764
765 /*
766 * Load the file "file" into memory. It may be either a shared object
767 * or an executable.
768 *
769 * The "addr" reference parameter is in/out. On entry, it specifies
770 * the address where a shared object should be loaded. If the file is
771 * an executable, this value is ignored. On exit, "addr" specifies
772 * where the file was actually loaded.
773 *
774 * The "entry" reference parameter is out only. On exit, it specifies
775 * the entry point for the loaded file.
776 */
777 static int
__elfN(load_file)778 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
779 u_long *entry)
780 {
781 struct {
782 struct nameidata nd;
783 struct vattr attr;
784 struct image_params image_params;
785 } *tempdata;
786 const Elf_Ehdr *hdr = NULL;
787 const Elf_Phdr *phdr = NULL;
788 struct nameidata *nd;
789 struct vattr *attr;
790 struct image_params *imgp;
791 u_long rbase;
792 u_long base_addr = 0;
793 int error;
794
795 #ifdef CAPABILITY_MODE
796 /*
797 * XXXJA: This check can go away once we are sufficiently confident
798 * that the checks in namei() are correct.
799 */
800 if (IN_CAPABILITY_MODE(curthread))
801 return (ECAPMODE);
802 #endif
803
804 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK | M_ZERO);
805 nd = &tempdata->nd;
806 attr = &tempdata->attr;
807 imgp = &tempdata->image_params;
808
809 /*
810 * Initialize part of the common data
811 */
812 imgp->proc = p;
813 imgp->attr = attr;
814
815 NDINIT(nd, LOOKUP, ISOPEN | FOLLOW | LOCKSHARED | LOCKLEAF,
816 UIO_SYSSPACE, file);
817 if ((error = namei(nd)) != 0) {
818 nd->ni_vp = NULL;
819 goto fail;
820 }
821 NDFREE_PNBUF(nd);
822 imgp->vp = nd->ni_vp;
823
824 /*
825 * Check permissions, modes, uid, etc on the file, and "open" it.
826 */
827 error = exec_check_permissions(imgp);
828 if (error)
829 goto fail;
830
831 error = exec_map_first_page(imgp);
832 if (error)
833 goto fail;
834
835 imgp->object = nd->ni_vp->v_object;
836
837 hdr = (const Elf_Ehdr *)imgp->image_header;
838 if ((error = __elfN(check_header)(hdr)) != 0)
839 goto fail;
840 if (hdr->e_type == ET_DYN)
841 rbase = *addr;
842 else if (hdr->e_type == ET_EXEC)
843 rbase = 0;
844 else {
845 error = ENOEXEC;
846 goto fail;
847 }
848
849 /* Only support headers that fit within first page for now */
850 if (!__elfN(phdr_in_zero_page)(hdr)) {
851 error = ENOEXEC;
852 goto fail;
853 }
854
855 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
856 if (!aligned(phdr, Elf_Addr)) {
857 error = ENOEXEC;
858 goto fail;
859 }
860
861 error = __elfN(load_sections)(imgp, hdr, phdr, rbase, &base_addr);
862 if (error != 0)
863 goto fail;
864
865 if (p->p_sysent->sv_protect != NULL)
866 p->p_sysent->sv_protect(imgp, SVP_INTERP);
867
868 *addr = base_addr;
869 *entry = (unsigned long)hdr->e_entry + rbase;
870
871 fail:
872 if (imgp->firstpage)
873 exec_unmap_first_page(imgp);
874
875 if (nd->ni_vp) {
876 if (imgp->textset)
877 VOP_UNSET_TEXT_CHECKED(nd->ni_vp);
878 vput(nd->ni_vp);
879 }
880 free(tempdata, M_TEMP);
881
882 return (error);
883 }
884
885 /*
886 * Select randomized valid address in the map map, between minv and
887 * maxv, with specified alignment. The [minv, maxv) range must belong
888 * to the map. Note that function only allocates the address, it is
889 * up to caller to clamp maxv in a way that the final allocation
890 * length fit into the map.
891 *
892 * Result is returned in *resp, error code indicates that arguments
893 * did not pass sanity checks for overflow and range correctness.
894 */
895 static int
__CONCAT(rnd_,__elfN (base))896 __CONCAT(rnd_, __elfN(base))(vm_map_t map, u_long minv, u_long maxv,
897 u_int align, u_long *resp)
898 {
899 u_long rbase, res;
900
901 MPASS(vm_map_min(map) <= minv);
902
903 if (minv >= maxv || minv + align >= maxv || maxv > vm_map_max(map)) {
904 uprintf("Invalid ELF segments layout\n");
905 return (ENOEXEC);
906 }
907
908 arc4rand(&rbase, sizeof(rbase), 0);
909 res = roundup(minv, (u_long)align) + rbase % (maxv - minv);
910 res &= ~((u_long)align - 1);
911 if (res >= maxv)
912 res -= align;
913
914 KASSERT(res >= minv,
915 ("res %#lx < minv %#lx, maxv %#lx rbase %#lx",
916 res, minv, maxv, rbase));
917 KASSERT(res < maxv,
918 ("res %#lx > maxv %#lx, minv %#lx rbase %#lx",
919 res, maxv, minv, rbase));
920
921 *resp = res;
922 return (0);
923 }
924
925 static int
__elfN(enforce_limits)926 __elfN(enforce_limits)(struct image_params *imgp, const Elf_Ehdr *hdr,
927 const Elf_Phdr *phdr)
928 {
929 struct vmspace *vmspace;
930 const char *err_str;
931 u_long text_size, data_size, total_size, text_addr, data_addr;
932 u_long seg_size, seg_addr;
933 int i;
934
935 err_str = NULL;
936 text_size = data_size = total_size = text_addr = data_addr = 0;
937
938 for (i = 0; i < hdr->e_phnum; i++) {
939 if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0)
940 continue;
941
942 seg_addr = trunc_page(phdr[i].p_vaddr + imgp->et_dyn_addr);
943 seg_size = round_page(phdr[i].p_memsz +
944 phdr[i].p_vaddr + imgp->et_dyn_addr - seg_addr);
945
946 /*
947 * Make the largest executable segment the official
948 * text segment and all others data.
949 *
950 * Note that obreak() assumes that data_addr + data_size == end
951 * of data load area, and the ELF file format expects segments
952 * to be sorted by address. If multiple data segments exist,
953 * the last one will be used.
954 */
955
956 if ((phdr[i].p_flags & PF_X) != 0 && text_size < seg_size) {
957 text_size = seg_size;
958 text_addr = seg_addr;
959 } else {
960 data_size = seg_size;
961 data_addr = seg_addr;
962 }
963 total_size += seg_size;
964 }
965
966 if (data_addr == 0 && data_size == 0) {
967 data_addr = text_addr;
968 data_size = text_size;
969 }
970
971 /*
972 * Check limits. It should be safe to check the
973 * limits after loading the segments since we do
974 * not actually fault in all the segments pages.
975 */
976 PROC_LOCK(imgp->proc);
977 if (data_size > lim_cur_proc(imgp->proc, RLIMIT_DATA))
978 err_str = "Data segment size exceeds process limit";
979 else if (text_size > maxtsiz)
980 err_str = "Text segment size exceeds system limit";
981 else if (total_size > lim_cur_proc(imgp->proc, RLIMIT_VMEM))
982 err_str = "Total segment size exceeds process limit";
983 else if (racct_set(imgp->proc, RACCT_DATA, data_size) != 0)
984 err_str = "Data segment size exceeds resource limit";
985 else if (racct_set(imgp->proc, RACCT_VMEM, total_size) != 0)
986 err_str = "Total segment size exceeds resource limit";
987 PROC_UNLOCK(imgp->proc);
988 if (err_str != NULL) {
989 uprintf("%s\n", err_str);
990 return (ENOMEM);
991 }
992
993 vmspace = imgp->proc->p_vmspace;
994 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
995 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
996 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
997 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
998
999 return (0);
1000 }
1001
1002 static int
__elfN(get_interp)1003 __elfN(get_interp)(struct image_params *imgp, const Elf_Phdr *phdr,
1004 char **interpp, bool *free_interpp)
1005 {
1006 struct thread *td;
1007 char *interp;
1008 int error, interp_name_len;
1009
1010 KASSERT(phdr->p_type == PT_INTERP,
1011 ("%s: p_type %u != PT_INTERP", __func__, phdr->p_type));
1012 ASSERT_VOP_LOCKED(imgp->vp, __func__);
1013
1014 td = curthread;
1015
1016 /* Path to interpreter */
1017 if (phdr->p_filesz < 2 || phdr->p_filesz > MAXPATHLEN) {
1018 uprintf("Invalid PT_INTERP\n");
1019 return (ENOEXEC);
1020 }
1021
1022 interp_name_len = phdr->p_filesz;
1023 if (phdr->p_offset > PAGE_SIZE ||
1024 interp_name_len > PAGE_SIZE - phdr->p_offset) {
1025 /*
1026 * The vnode lock might be needed by the pagedaemon to
1027 * clean pages owned by the vnode. Do not allow sleep
1028 * waiting for memory with the vnode locked, instead
1029 * try non-sleepable allocation first, and if it
1030 * fails, go to the slow path were we drop the lock
1031 * and do M_WAITOK. A text reference prevents
1032 * modifications to the vnode content.
1033 */
1034 interp = malloc(interp_name_len + 1, M_TEMP, M_NOWAIT);
1035 if (interp == NULL) {
1036 VOP_UNLOCK(imgp->vp);
1037 interp = malloc(interp_name_len + 1, M_TEMP, M_WAITOK);
1038 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
1039 }
1040
1041 error = vn_rdwr(UIO_READ, imgp->vp, interp,
1042 interp_name_len, phdr->p_offset,
1043 UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred,
1044 NOCRED, NULL, td);
1045 if (error != 0) {
1046 free(interp, M_TEMP);
1047 uprintf("i/o error PT_INTERP %d\n", error);
1048 return (error);
1049 }
1050 interp[interp_name_len] = '\0';
1051
1052 *interpp = interp;
1053 *free_interpp = true;
1054 return (0);
1055 }
1056
1057 interp = __DECONST(char *, imgp->image_header) + phdr->p_offset;
1058 if (interp[interp_name_len - 1] != '\0') {
1059 uprintf("Invalid PT_INTERP\n");
1060 return (ENOEXEC);
1061 }
1062
1063 *interpp = interp;
1064 *free_interpp = false;
1065 return (0);
1066 }
1067
1068 static int
__elfN(load_interp)1069 __elfN(load_interp)(struct image_params *imgp, const Elf_Brandinfo *brand_info,
1070 const char *interp, u_long *addr, u_long *entry)
1071 {
1072 int error;
1073
1074 if (brand_info->interp_newpath != NULL &&
1075 (brand_info->interp_path == NULL ||
1076 strcmp(interp, brand_info->interp_path) == 0)) {
1077 error = __elfN(load_file)(imgp->proc,
1078 brand_info->interp_newpath, addr, entry);
1079 if (error == 0)
1080 return (0);
1081 }
1082
1083 error = __elfN(load_file)(imgp->proc, interp, addr, entry);
1084 if (error == 0)
1085 return (0);
1086
1087 uprintf("ELF interpreter %s not found, error %d\n", interp, error);
1088 return (error);
1089 }
1090
1091 /*
1092 * Impossible et_dyn_addr initial value indicating that the real base
1093 * must be calculated later with some randomization applied.
1094 */
1095 #define ET_DYN_ADDR_RAND 1
1096
1097 static int
__CONCAT(exec_,__elfN (imgact))1098 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
1099 {
1100 struct thread *td;
1101 const Elf_Ehdr *hdr;
1102 const Elf_Phdr *phdr;
1103 Elf_Auxargs *elf_auxargs;
1104 struct vmspace *vmspace;
1105 vm_map_t map;
1106 char *interp;
1107 Elf_Brandinfo *brand_info;
1108 struct sysentvec *sv;
1109 u_long addr, baddr, entry, proghdr;
1110 u_long maxalign, maxsalign, mapsz, maxv, maxv1, anon_loc;
1111 uint32_t fctl0;
1112 int32_t osrel;
1113 bool free_interp;
1114 int error, i, n;
1115
1116 hdr = (const Elf_Ehdr *)imgp->image_header;
1117
1118 /*
1119 * Do we have a valid ELF header ?
1120 *
1121 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
1122 * if particular brand doesn't support it.
1123 */
1124 if (__elfN(check_header)(hdr) != 0 ||
1125 (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
1126 return (-1);
1127
1128 /*
1129 * From here on down, we return an errno, not -1, as we've
1130 * detected an ELF file.
1131 */
1132
1133 if (!__elfN(phdr_in_zero_page)(hdr)) {
1134 uprintf("Program headers not in the first page\n");
1135 return (ENOEXEC);
1136 }
1137 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
1138 if (!aligned(phdr, Elf_Addr)) {
1139 uprintf("Unaligned program headers\n");
1140 return (ENOEXEC);
1141 }
1142
1143 n = error = 0;
1144 baddr = 0;
1145 osrel = 0;
1146 fctl0 = 0;
1147 entry = proghdr = 0;
1148 interp = NULL;
1149 free_interp = false;
1150 td = curthread;
1151
1152 /*
1153 * Somewhat arbitrary, limit accepted max alignment for the
1154 * loadable segment to the max supported superpage size. Too
1155 * large alignment requests are not useful and are indicators
1156 * of corrupted or outright malicious binary.
1157 */
1158 maxalign = PAGE_SIZE;
1159 maxsalign = PAGE_SIZE * 1024;
1160 for (i = MAXPAGESIZES - 1; i > 0; i--) {
1161 if (pagesizes[i] > maxsalign)
1162 maxsalign = pagesizes[i];
1163 }
1164
1165 mapsz = 0;
1166
1167 for (i = 0; i < hdr->e_phnum; i++) {
1168 switch (phdr[i].p_type) {
1169 case PT_LOAD:
1170 if (n == 0)
1171 baddr = phdr[i].p_vaddr;
1172 if (!powerof2(phdr[i].p_align) ||
1173 phdr[i].p_align > maxsalign) {
1174 uprintf("Invalid segment alignment\n");
1175 error = ENOEXEC;
1176 goto ret;
1177 }
1178 if (phdr[i].p_align > maxalign)
1179 maxalign = phdr[i].p_align;
1180 if (mapsz + phdr[i].p_memsz < mapsz) {
1181 uprintf("Mapsize overflow\n");
1182 error = ENOEXEC;
1183 goto ret;
1184 }
1185 mapsz += phdr[i].p_memsz;
1186 n++;
1187
1188 /*
1189 * If this segment contains the program headers,
1190 * remember their virtual address for the AT_PHDR
1191 * aux entry. Static binaries don't usually include
1192 * a PT_PHDR entry.
1193 */
1194 if (phdr[i].p_offset == 0 &&
1195 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize <=
1196 phdr[i].p_filesz)
1197 proghdr = phdr[i].p_vaddr + hdr->e_phoff;
1198 break;
1199 case PT_INTERP:
1200 /* Path to interpreter */
1201 if (interp != NULL) {
1202 uprintf("Multiple PT_INTERP headers\n");
1203 error = ENOEXEC;
1204 goto ret;
1205 }
1206 error = __elfN(get_interp)(imgp, &phdr[i], &interp,
1207 &free_interp);
1208 if (error != 0)
1209 goto ret;
1210 break;
1211 case PT_GNU_STACK:
1212 if (__elfN(nxstack)) {
1213 imgp->stack_prot =
1214 __elfN(trans_prot)(phdr[i].p_flags);
1215 if ((imgp->stack_prot & VM_PROT_RW) !=
1216 VM_PROT_RW) {
1217 uprintf("Invalid PT_GNU_STACK\n");
1218 error = ENOEXEC;
1219 goto ret;
1220 }
1221 }
1222 imgp->stack_sz = phdr[i].p_memsz;
1223 break;
1224 case PT_PHDR: /* Program header table info */
1225 proghdr = phdr[i].p_vaddr;
1226 break;
1227 }
1228 }
1229
1230 brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel, &fctl0);
1231 if (brand_info == NULL) {
1232 uprintf("ELF binary type \"%u\" not known.\n",
1233 hdr->e_ident[EI_OSABI]);
1234 error = ENOEXEC;
1235 goto ret;
1236 }
1237 sv = brand_info->sysvec;
1238 if (hdr->e_type == ET_DYN) {
1239 if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) {
1240 uprintf("Cannot execute shared object\n");
1241 error = ENOEXEC;
1242 goto ret;
1243 }
1244 /*
1245 * Honour the base load address from the dso if it is
1246 * non-zero for some reason.
1247 */
1248 if (baddr == 0) {
1249 if ((sv->sv_flags & SV_ASLR) == 0 ||
1250 (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0)
1251 imgp->et_dyn_addr = __elfN(pie_base);
1252 else if ((__elfN(pie_aslr_enabled) &&
1253 (imgp->proc->p_flag2 & P2_ASLR_DISABLE) == 0) ||
1254 (imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0)
1255 imgp->et_dyn_addr = ET_DYN_ADDR_RAND;
1256 else
1257 imgp->et_dyn_addr = __elfN(pie_base);
1258 }
1259 }
1260
1261 /*
1262 * Avoid a possible deadlock if the current address space is destroyed
1263 * and that address space maps the locked vnode. In the common case,
1264 * the locked vnode's v_usecount is decremented but remains greater
1265 * than zero. Consequently, the vnode lock is not needed by vrele().
1266 * However, in cases where the vnode lock is external, such as nullfs,
1267 * v_usecount may become zero.
1268 *
1269 * The VV_TEXT flag prevents modifications to the executable while
1270 * the vnode is unlocked.
1271 */
1272 VOP_UNLOCK(imgp->vp);
1273
1274 /*
1275 * Decide whether to enable randomization of user mappings.
1276 * First, reset user preferences for the setid binaries.
1277 * Then, account for the support of the randomization by the
1278 * ABI, by user preferences, and make special treatment for
1279 * PIE binaries.
1280 */
1281 if (imgp->credential_setid) {
1282 PROC_LOCK(imgp->proc);
1283 imgp->proc->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE |
1284 P2_WXORX_DISABLE | P2_WXORX_ENABLE_EXEC);
1285 PROC_UNLOCK(imgp->proc);
1286 }
1287 if ((sv->sv_flags & SV_ASLR) == 0 ||
1288 (imgp->proc->p_flag2 & P2_ASLR_DISABLE) != 0 ||
1289 (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) {
1290 KASSERT(imgp->et_dyn_addr != ET_DYN_ADDR_RAND,
1291 ("imgp->et_dyn_addr == RAND and !ASLR"));
1292 } else if ((imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0 ||
1293 (__elfN(aslr_enabled) && hdr->e_type == ET_EXEC) ||
1294 imgp->et_dyn_addr == ET_DYN_ADDR_RAND) {
1295 imgp->map_flags |= MAP_ASLR;
1296 /*
1297 * If user does not care about sbrk, utilize the bss
1298 * grow region for mappings as well. We can select
1299 * the base for the image anywere and still not suffer
1300 * from the fragmentation.
1301 */
1302 if (!__elfN(aslr_honor_sbrk) ||
1303 (imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0)
1304 imgp->map_flags |= MAP_ASLR_IGNSTART;
1305 if (__elfN(aslr_stack))
1306 imgp->map_flags |= MAP_ASLR_STACK;
1307 if (__elfN(aslr_shared_page))
1308 imgp->imgp_flags |= IMGP_ASLR_SHARED_PAGE;
1309 }
1310
1311 if ((!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0 &&
1312 (imgp->proc->p_flag2 & P2_WXORX_DISABLE) == 0) ||
1313 (imgp->proc->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0)
1314 imgp->map_flags |= MAP_WXORX;
1315
1316 error = exec_new_vmspace(imgp, sv);
1317
1318 imgp->proc->p_sysent = sv;
1319 imgp->proc->p_elf_brandinfo = brand_info;
1320
1321 vmspace = imgp->proc->p_vmspace;
1322 map = &vmspace->vm_map;
1323 maxv = sv->sv_usrstack;
1324 if ((imgp->map_flags & MAP_ASLR_STACK) == 0)
1325 maxv -= lim_max(td, RLIMIT_STACK);
1326 if (error == 0 && mapsz >= maxv - vm_map_min(map)) {
1327 uprintf("Excessive mapping size\n");
1328 error = ENOEXEC;
1329 }
1330
1331 if (error == 0 && imgp->et_dyn_addr == ET_DYN_ADDR_RAND) {
1332 KASSERT((map->flags & MAP_ASLR) != 0,
1333 ("ET_DYN_ADDR_RAND but !MAP_ASLR"));
1334 error = __CONCAT(rnd_, __elfN(base))(map,
1335 vm_map_min(map) + mapsz + lim_max(td, RLIMIT_DATA),
1336 /* reserve half of the address space to interpreter */
1337 maxv / 2, maxalign, &imgp->et_dyn_addr);
1338 }
1339
1340 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
1341 if (error != 0)
1342 goto ret;
1343
1344 error = __elfN(load_sections)(imgp, hdr, phdr, imgp->et_dyn_addr, NULL);
1345 if (error != 0)
1346 goto ret;
1347
1348 error = __elfN(enforce_limits)(imgp, hdr, phdr);
1349 if (error != 0)
1350 goto ret;
1351
1352 /*
1353 * We load the dynamic linker where a userland call
1354 * to mmap(0, ...) would put it. The rationale behind this
1355 * calculation is that it leaves room for the heap to grow to
1356 * its maximum allowed size.
1357 */
1358 addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(td,
1359 RLIMIT_DATA));
1360 if ((map->flags & MAP_ASLR) != 0) {
1361 maxv1 = maxv / 2 + addr / 2;
1362 error = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1,
1363 (MAXPAGESIZES > 1 && pagesizes[1] != 0) ?
1364 pagesizes[1] : pagesizes[0], &anon_loc);
1365 if (error != 0)
1366 goto ret;
1367 map->anon_loc = anon_loc;
1368 } else {
1369 map->anon_loc = addr;
1370 }
1371
1372 entry = (u_long)hdr->e_entry + imgp->et_dyn_addr;
1373 imgp->entry_addr = entry;
1374
1375 if (sv->sv_protect != NULL)
1376 sv->sv_protect(imgp, SVP_IMAGE);
1377
1378 if (interp != NULL) {
1379 VOP_UNLOCK(imgp->vp);
1380 if ((map->flags & MAP_ASLR) != 0) {
1381 /* Assume that interpreter fits into 1/4 of AS */
1382 maxv1 = maxv / 2 + addr / 2;
1383 error = __CONCAT(rnd_, __elfN(base))(map, addr,
1384 maxv1, PAGE_SIZE, &addr);
1385 }
1386 if (error == 0) {
1387 error = __elfN(load_interp)(imgp, brand_info, interp,
1388 &addr, &imgp->entry_addr);
1389 }
1390 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
1391 if (error != 0)
1392 goto ret;
1393 } else
1394 addr = imgp->et_dyn_addr;
1395
1396 error = exec_map_stack(imgp);
1397 if (error != 0)
1398 goto ret;
1399
1400 /*
1401 * Construct auxargs table (used by the copyout_auxargs routine)
1402 */
1403 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_NOWAIT);
1404 if (elf_auxargs == NULL) {
1405 VOP_UNLOCK(imgp->vp);
1406 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
1407 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
1408 }
1409 elf_auxargs->execfd = -1;
1410 elf_auxargs->phdr = proghdr + imgp->et_dyn_addr;
1411 elf_auxargs->phent = hdr->e_phentsize;
1412 elf_auxargs->phnum = hdr->e_phnum;
1413 elf_auxargs->pagesz = PAGE_SIZE;
1414 elf_auxargs->base = addr;
1415 elf_auxargs->flags = 0;
1416 elf_auxargs->entry = entry;
1417 elf_auxargs->hdr_eflags = hdr->e_flags;
1418
1419 imgp->auxargs = elf_auxargs;
1420 imgp->interpreted = 0;
1421 imgp->reloc_base = addr;
1422 imgp->proc->p_osrel = osrel;
1423 imgp->proc->p_fctl0 = fctl0;
1424 imgp->proc->p_elf_flags = hdr->e_flags;
1425
1426 ret:
1427 ASSERT_VOP_LOCKED(imgp->vp, "skipped relock");
1428 if (free_interp)
1429 free(interp, M_TEMP);
1430 return (error);
1431 }
1432
1433 #define elf_suword __CONCAT(suword, __ELF_WORD_SIZE)
1434
1435 int
__elfN(freebsd_copyout_auxargs)1436 __elfN(freebsd_copyout_auxargs)(struct image_params *imgp, uintptr_t base)
1437 {
1438 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
1439 Elf_Auxinfo *argarray, *pos;
1440 struct vmspace *vmspace;
1441 rlim_t stacksz;
1442 int error, oc;
1443 uint32_t bsdflags;
1444
1445 argarray = pos = malloc(AT_COUNT * sizeof(*pos), M_TEMP,
1446 M_WAITOK | M_ZERO);
1447
1448 vmspace = imgp->proc->p_vmspace;
1449
1450 if (args->execfd != -1)
1451 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
1452 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
1453 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
1454 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
1455 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
1456 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
1457 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
1458 AUXARGS_ENTRY(pos, AT_BASE, args->base);
1459 AUXARGS_ENTRY(pos, AT_EHDRFLAGS, args->hdr_eflags);
1460 if (imgp->execpathp != 0)
1461 AUXARGS_ENTRY_PTR(pos, AT_EXECPATH, imgp->execpathp);
1462 AUXARGS_ENTRY(pos, AT_OSRELDATE,
1463 imgp->proc->p_ucred->cr_prison->pr_osreldate);
1464 if (imgp->canary != 0) {
1465 AUXARGS_ENTRY_PTR(pos, AT_CANARY, imgp->canary);
1466 AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen);
1467 }
1468 AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus);
1469 if (imgp->pagesizes != 0) {
1470 AUXARGS_ENTRY_PTR(pos, AT_PAGESIZES, imgp->pagesizes);
1471 AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen);
1472 }
1473 if ((imgp->sysent->sv_flags & SV_TIMEKEEP) != 0) {
1474 AUXARGS_ENTRY(pos, AT_TIMEKEEP,
1475 vmspace->vm_shp_base + imgp->sysent->sv_timekeep_offset);
1476 }
1477 AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj
1478 != NULL && imgp->stack_prot != 0 ? imgp->stack_prot :
1479 imgp->sysent->sv_stackprot);
1480 if (imgp->sysent->sv_hwcap != NULL)
1481 AUXARGS_ENTRY(pos, AT_HWCAP, *imgp->sysent->sv_hwcap);
1482 if (imgp->sysent->sv_hwcap2 != NULL)
1483 AUXARGS_ENTRY(pos, AT_HWCAP2, *imgp->sysent->sv_hwcap2);
1484 bsdflags = 0;
1485 bsdflags |= __elfN(sigfastblock) ? ELF_BSDF_SIGFASTBLK : 0;
1486 oc = atomic_load_int(&vm_overcommit);
1487 bsdflags |= (oc & (SWAP_RESERVE_FORCE_ON | SWAP_RESERVE_RLIMIT_ON)) !=
1488 0 ? ELF_BSDF_VMNOOVERCOMMIT : 0;
1489 AUXARGS_ENTRY(pos, AT_BSDFLAGS, bsdflags);
1490 AUXARGS_ENTRY(pos, AT_ARGC, imgp->args->argc);
1491 AUXARGS_ENTRY_PTR(pos, AT_ARGV, imgp->argv);
1492 AUXARGS_ENTRY(pos, AT_ENVC, imgp->args->envc);
1493 AUXARGS_ENTRY_PTR(pos, AT_ENVV, imgp->envv);
1494 AUXARGS_ENTRY_PTR(pos, AT_PS_STRINGS, imgp->ps_strings);
1495 #ifdef RANDOM_FENESTRASX
1496 if ((imgp->sysent->sv_flags & SV_RNG_SEED_VER) != 0) {
1497 AUXARGS_ENTRY(pos, AT_FXRNG,
1498 vmspace->vm_shp_base + imgp->sysent->sv_fxrng_gen_offset);
1499 }
1500 #endif
1501 if ((imgp->sysent->sv_flags & SV_DSO_SIG) != 0 && __elfN(vdso) != 0) {
1502 AUXARGS_ENTRY(pos, AT_KPRELOAD,
1503 vmspace->vm_shp_base + imgp->sysent->sv_vdso_offset);
1504 }
1505 AUXARGS_ENTRY(pos, AT_USRSTACKBASE, round_page(vmspace->vm_stacktop));
1506 stacksz = imgp->proc->p_limit->pl_rlimit[RLIMIT_STACK].rlim_cur;
1507 AUXARGS_ENTRY(pos, AT_USRSTACKLIM, stacksz);
1508 AUXARGS_ENTRY(pos, AT_NULL, 0);
1509
1510 free(imgp->auxargs, M_TEMP);
1511 imgp->auxargs = NULL;
1512 KASSERT(pos - argarray <= AT_COUNT, ("Too many auxargs"));
1513
1514 error = copyout(argarray, (void *)base, sizeof(*argarray) * AT_COUNT);
1515 free(argarray, M_TEMP);
1516 return (error);
1517 }
1518
1519 int
__elfN(freebsd_fixup)1520 __elfN(freebsd_fixup)(uintptr_t *stack_base, struct image_params *imgp)
1521 {
1522 Elf_Addr *base;
1523
1524 base = (Elf_Addr *)*stack_base;
1525 base--;
1526 if (elf_suword(base, imgp->args->argc) == -1)
1527 return (EFAULT);
1528 *stack_base = (uintptr_t)base;
1529 return (0);
1530 }
1531
1532 /*
1533 * Code for generating ELF core dumps.
1534 */
1535
1536 typedef void (*segment_callback)(vm_map_entry_t, void *);
1537
1538 /* Closure for cb_put_phdr(). */
1539 struct phdr_closure {
1540 Elf_Phdr *phdr; /* Program header to fill in */
1541 Elf_Off offset; /* Offset of segment in core file */
1542 };
1543
1544 struct note_info {
1545 int type; /* Note type. */
1546 struct regset *regset; /* Register set. */
1547 outfunc_t outfunc; /* Output function. */
1548 void *outarg; /* Argument for the output function. */
1549 size_t outsize; /* Output size. */
1550 TAILQ_ENTRY(note_info) link; /* Link to the next note info. */
1551 };
1552
1553 TAILQ_HEAD(note_info_list, note_info);
1554
1555 extern int compress_user_cores;
1556 extern int compress_user_cores_level;
1557
1558 static void cb_put_phdr(vm_map_entry_t, void *);
1559 static void cb_size_segment(vm_map_entry_t, void *);
1560 static void each_dumpable_segment(struct thread *, segment_callback, void *,
1561 int);
1562 static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t,
1563 struct note_info_list *, size_t, int);
1564 static void __elfN(putnote)(struct thread *td, struct note_info *, struct sbuf *);
1565
1566 static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *);
1567 static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *);
1568 static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *);
1569 static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *);
1570 static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *);
1571 static void note_procstat_files(void *, struct sbuf *, size_t *);
1572 static void note_procstat_groups(void *, struct sbuf *, size_t *);
1573 static void note_procstat_osrel(void *, struct sbuf *, size_t *);
1574 static void note_procstat_rlimit(void *, struct sbuf *, size_t *);
1575 static void note_procstat_umask(void *, struct sbuf *, size_t *);
1576 static void note_procstat_vmmap(void *, struct sbuf *, size_t *);
1577
1578 static int
core_compressed_write(void * base,size_t len,off_t offset,void * arg)1579 core_compressed_write(void *base, size_t len, off_t offset, void *arg)
1580 {
1581
1582 return (core_write((struct coredump_params *)arg, base, len, offset,
1583 UIO_SYSSPACE, NULL));
1584 }
1585
1586 int
__elfN(coredump)1587 __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags)
1588 {
1589 struct ucred *cred = td->td_ucred;
1590 int compm, error = 0;
1591 struct sseg_closure seginfo;
1592 struct note_info_list notelst;
1593 struct coredump_params params;
1594 struct note_info *ninfo;
1595 void *hdr, *tmpbuf;
1596 size_t hdrsize, notesz, coresize;
1597
1598 hdr = NULL;
1599 tmpbuf = NULL;
1600 TAILQ_INIT(¬elst);
1601
1602 /* Size the program segments. */
1603 __elfN(size_segments)(td, &seginfo, flags);
1604
1605 /*
1606 * Collect info about the core file header area.
1607 */
1608 hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count);
1609 if (seginfo.count + 1 >= PN_XNUM)
1610 hdrsize += sizeof(Elf_Shdr);
1611 td->td_proc->p_sysent->sv_elf_core_prepare_notes(td, ¬elst, ¬esz);
1612 coresize = round_page(hdrsize + notesz) + seginfo.size;
1613
1614 /* Set up core dump parameters. */
1615 params.offset = 0;
1616 params.active_cred = cred;
1617 params.file_cred = NOCRED;
1618 params.td = td;
1619 params.vp = vp;
1620 params.comp = NULL;
1621
1622 #ifdef RACCT
1623 if (racct_enable) {
1624 PROC_LOCK(td->td_proc);
1625 error = racct_add(td->td_proc, RACCT_CORE, coresize);
1626 PROC_UNLOCK(td->td_proc);
1627 if (error != 0) {
1628 error = EFAULT;
1629 goto done;
1630 }
1631 }
1632 #endif
1633 if (coresize >= limit) {
1634 error = EFAULT;
1635 goto done;
1636 }
1637
1638 /* Create a compression stream if necessary. */
1639 compm = compress_user_cores;
1640 if ((flags & (SVC_PT_COREDUMP | SVC_NOCOMPRESS)) == SVC_PT_COREDUMP &&
1641 compm == 0)
1642 compm = COMPRESS_GZIP;
1643 if (compm != 0) {
1644 params.comp = compressor_init(core_compressed_write,
1645 compm, CORE_BUF_SIZE,
1646 compress_user_cores_level, ¶ms);
1647 if (params.comp == NULL) {
1648 error = EFAULT;
1649 goto done;
1650 }
1651 tmpbuf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO);
1652 }
1653
1654 /*
1655 * Allocate memory for building the header, fill it up,
1656 * and write it out following the notes.
1657 */
1658 hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
1659 error = __elfN(corehdr)(¶ms, seginfo.count, hdr, hdrsize, ¬elst,
1660 notesz, flags);
1661
1662 /* Write the contents of all of the writable segments. */
1663 if (error == 0) {
1664 Elf_Phdr *php;
1665 off_t offset;
1666 int i;
1667
1668 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
1669 offset = round_page(hdrsize + notesz);
1670 for (i = 0; i < seginfo.count; i++) {
1671 error = core_output((char *)(uintptr_t)php->p_vaddr,
1672 php->p_filesz, offset, ¶ms, tmpbuf);
1673 if (error != 0)
1674 break;
1675 offset += php->p_filesz;
1676 php++;
1677 }
1678 if (error == 0 && params.comp != NULL)
1679 error = compressor_flush(params.comp);
1680 }
1681 if (error) {
1682 log(LOG_WARNING,
1683 "Failed to write core file for process %s (error %d)\n",
1684 curproc->p_comm, error);
1685 }
1686
1687 done:
1688 free(tmpbuf, M_TEMP);
1689 if (params.comp != NULL)
1690 compressor_fini(params.comp);
1691 while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) {
1692 TAILQ_REMOVE(¬elst, ninfo, link);
1693 free(ninfo, M_TEMP);
1694 }
1695 if (hdr != NULL)
1696 free(hdr, M_TEMP);
1697
1698 return (error);
1699 }
1700
1701 /*
1702 * A callback for each_dumpable_segment() to write out the segment's
1703 * program header entry.
1704 */
1705 static void
cb_put_phdr(vm_map_entry_t entry,void * closure)1706 cb_put_phdr(vm_map_entry_t entry, void *closure)
1707 {
1708 struct phdr_closure *phc = (struct phdr_closure *)closure;
1709 Elf_Phdr *phdr = phc->phdr;
1710
1711 phc->offset = round_page(phc->offset);
1712
1713 phdr->p_type = PT_LOAD;
1714 phdr->p_offset = phc->offset;
1715 phdr->p_vaddr = entry->start;
1716 phdr->p_paddr = 0;
1717 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1718 phdr->p_align = PAGE_SIZE;
1719 phdr->p_flags = __elfN(untrans_prot)(entry->protection);
1720
1721 phc->offset += phdr->p_filesz;
1722 phc->phdr++;
1723 }
1724
1725 /*
1726 * A callback for each_dumpable_segment() to gather information about
1727 * the number of segments and their total size.
1728 */
1729 static void
cb_size_segment(vm_map_entry_t entry,void * closure)1730 cb_size_segment(vm_map_entry_t entry, void *closure)
1731 {
1732 struct sseg_closure *ssc = (struct sseg_closure *)closure;
1733
1734 ssc->count++;
1735 ssc->size += entry->end - entry->start;
1736 }
1737
1738 void
__elfN(size_segments)1739 __elfN(size_segments)(struct thread *td, struct sseg_closure *seginfo,
1740 int flags)
1741 {
1742 seginfo->count = 0;
1743 seginfo->size = 0;
1744
1745 each_dumpable_segment(td, cb_size_segment, seginfo, flags);
1746 }
1747
1748 /*
1749 * For each writable segment in the process's memory map, call the given
1750 * function with a pointer to the map entry and some arbitrary
1751 * caller-supplied data.
1752 */
1753 static void
each_dumpable_segment(struct thread * td,segment_callback func,void * closure,int flags)1754 each_dumpable_segment(struct thread *td, segment_callback func, void *closure,
1755 int flags)
1756 {
1757 struct proc *p = td->td_proc;
1758 vm_map_t map = &p->p_vmspace->vm_map;
1759 vm_map_entry_t entry;
1760 vm_object_t backing_object, object;
1761 bool ignore_entry;
1762
1763 vm_map_lock_read(map);
1764 VM_MAP_ENTRY_FOREACH(entry, map) {
1765 /*
1766 * Don't dump inaccessible mappings, deal with legacy
1767 * coredump mode.
1768 *
1769 * Note that read-only segments related to the elf binary
1770 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1771 * need to arbitrarily ignore such segments.
1772 */
1773 if ((flags & SVC_ALL) == 0) {
1774 if (elf_legacy_coredump) {
1775 if ((entry->protection & VM_PROT_RW) !=
1776 VM_PROT_RW)
1777 continue;
1778 } else {
1779 if ((entry->protection & VM_PROT_ALL) == 0)
1780 continue;
1781 }
1782 }
1783
1784 /*
1785 * Dont include memory segment in the coredump if
1786 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1787 * madvise(2). Do not dump submaps (i.e. parts of the
1788 * kernel map).
1789 */
1790 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
1791 continue;
1792 if ((entry->eflags & MAP_ENTRY_NOCOREDUMP) != 0 &&
1793 (flags & SVC_ALL) == 0)
1794 continue;
1795 if ((object = entry->object.vm_object) == NULL)
1796 continue;
1797
1798 /* Ignore memory-mapped devices and such things. */
1799 VM_OBJECT_RLOCK(object);
1800 while ((backing_object = object->backing_object) != NULL) {
1801 VM_OBJECT_RLOCK(backing_object);
1802 VM_OBJECT_RUNLOCK(object);
1803 object = backing_object;
1804 }
1805 ignore_entry = (object->flags & OBJ_FICTITIOUS) != 0;
1806 VM_OBJECT_RUNLOCK(object);
1807 if (ignore_entry)
1808 continue;
1809
1810 (*func)(entry, closure);
1811 }
1812 vm_map_unlock_read(map);
1813 }
1814
1815 /*
1816 * Write the core file header to the file, including padding up to
1817 * the page boundary.
1818 */
1819 static int
__elfN(corehdr)1820 __elfN(corehdr)(struct coredump_params *p, int numsegs, void *hdr,
1821 size_t hdrsize, struct note_info_list *notelst, size_t notesz,
1822 int flags)
1823 {
1824 struct note_info *ninfo;
1825 struct sbuf *sb;
1826 int error;
1827
1828 /* Fill in the header. */
1829 bzero(hdr, hdrsize);
1830 __elfN(puthdr)(p->td, hdr, hdrsize, numsegs, notesz, flags);
1831
1832 sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN);
1833 sbuf_set_drain(sb, sbuf_drain_core_output, p);
1834 sbuf_start_section(sb, NULL);
1835 sbuf_bcat(sb, hdr, hdrsize);
1836 TAILQ_FOREACH(ninfo, notelst, link)
1837 __elfN(putnote)(p->td, ninfo, sb);
1838 /* Align up to a page boundary for the program segments. */
1839 sbuf_end_section(sb, -1, PAGE_SIZE, 0);
1840 error = sbuf_finish(sb);
1841 sbuf_delete(sb);
1842
1843 return (error);
1844 }
1845
1846 void
__elfN(prepare_notes)1847 __elfN(prepare_notes)(struct thread *td, struct note_info_list *list,
1848 size_t *sizep)
1849 {
1850 struct proc *p;
1851 struct thread *thr;
1852 size_t size;
1853
1854 p = td->td_proc;
1855 size = 0;
1856
1857 size += __elfN(register_note)(td, list, NT_PRPSINFO,
1858 __elfN(note_prpsinfo), p);
1859
1860 /*
1861 * To have the debugger select the right thread (LWP) as the initial
1862 * thread, we dump the state of the thread passed to us in td first.
1863 * This is the thread that causes the core dump and thus likely to
1864 * be the right thread one wants to have selected in the debugger.
1865 */
1866 thr = td;
1867 while (thr != NULL) {
1868 size += __elfN(prepare_register_notes)(td, list, thr);
1869 size += __elfN(register_note)(td, list, -1,
1870 __elfN(note_threadmd), thr);
1871
1872 thr = thr == td ? TAILQ_FIRST(&p->p_threads) :
1873 TAILQ_NEXT(thr, td_plist);
1874 if (thr == td)
1875 thr = TAILQ_NEXT(thr, td_plist);
1876 }
1877
1878 size += __elfN(register_note)(td, list, NT_PROCSTAT_PROC,
1879 __elfN(note_procstat_proc), p);
1880 size += __elfN(register_note)(td, list, NT_PROCSTAT_FILES,
1881 note_procstat_files, p);
1882 size += __elfN(register_note)(td, list, NT_PROCSTAT_VMMAP,
1883 note_procstat_vmmap, p);
1884 size += __elfN(register_note)(td, list, NT_PROCSTAT_GROUPS,
1885 note_procstat_groups, p);
1886 size += __elfN(register_note)(td, list, NT_PROCSTAT_UMASK,
1887 note_procstat_umask, p);
1888 size += __elfN(register_note)(td, list, NT_PROCSTAT_RLIMIT,
1889 note_procstat_rlimit, p);
1890 size += __elfN(register_note)(td, list, NT_PROCSTAT_OSREL,
1891 note_procstat_osrel, p);
1892 size += __elfN(register_note)(td, list, NT_PROCSTAT_PSSTRINGS,
1893 __elfN(note_procstat_psstrings), p);
1894 size += __elfN(register_note)(td, list, NT_PROCSTAT_AUXV,
1895 __elfN(note_procstat_auxv), p);
1896
1897 *sizep = size;
1898 }
1899
1900 void
__elfN(puthdr)1901 __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs,
1902 size_t notesz, int flags)
1903 {
1904 Elf_Ehdr *ehdr;
1905 Elf_Phdr *phdr;
1906 Elf_Shdr *shdr;
1907 struct phdr_closure phc;
1908 Elf_Brandinfo *bi;
1909
1910 ehdr = (Elf_Ehdr *)hdr;
1911 bi = td->td_proc->p_elf_brandinfo;
1912
1913 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1914 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1915 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1916 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1917 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1918 ehdr->e_ident[EI_DATA] = ELF_DATA;
1919 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1920 ehdr->e_ident[EI_OSABI] = td->td_proc->p_sysent->sv_elf_core_osabi;
1921 ehdr->e_ident[EI_ABIVERSION] = 0;
1922 ehdr->e_ident[EI_PAD] = 0;
1923 ehdr->e_type = ET_CORE;
1924 ehdr->e_machine = bi->machine;
1925 ehdr->e_version = EV_CURRENT;
1926 ehdr->e_entry = 0;
1927 ehdr->e_phoff = sizeof(Elf_Ehdr);
1928 ehdr->e_flags = td->td_proc->p_elf_flags;
1929 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1930 ehdr->e_phentsize = sizeof(Elf_Phdr);
1931 ehdr->e_shentsize = sizeof(Elf_Shdr);
1932 ehdr->e_shstrndx = SHN_UNDEF;
1933 if (numsegs + 1 < PN_XNUM) {
1934 ehdr->e_phnum = numsegs + 1;
1935 ehdr->e_shnum = 0;
1936 } else {
1937 ehdr->e_phnum = PN_XNUM;
1938 ehdr->e_shnum = 1;
1939
1940 ehdr->e_shoff = ehdr->e_phoff +
1941 (numsegs + 1) * ehdr->e_phentsize;
1942 KASSERT(ehdr->e_shoff == hdrsize - sizeof(Elf_Shdr),
1943 ("e_shoff: %zu, hdrsize - shdr: %zu",
1944 (size_t)ehdr->e_shoff, hdrsize - sizeof(Elf_Shdr)));
1945
1946 shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff);
1947 memset(shdr, 0, sizeof(*shdr));
1948 /*
1949 * A special first section is used to hold large segment and
1950 * section counts. This was proposed by Sun Microsystems in
1951 * Solaris and has been adopted by Linux; the standard ELF
1952 * tools are already familiar with the technique.
1953 *
1954 * See table 7-7 of the Solaris "Linker and Libraries Guide"
1955 * (or 12-7 depending on the version of the document) for more
1956 * details.
1957 */
1958 shdr->sh_type = SHT_NULL;
1959 shdr->sh_size = ehdr->e_shnum;
1960 shdr->sh_link = ehdr->e_shstrndx;
1961 shdr->sh_info = numsegs + 1;
1962 }
1963
1964 /*
1965 * Fill in the program header entries.
1966 */
1967 phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff);
1968
1969 /* The note segement. */
1970 phdr->p_type = PT_NOTE;
1971 phdr->p_offset = hdrsize;
1972 phdr->p_vaddr = 0;
1973 phdr->p_paddr = 0;
1974 phdr->p_filesz = notesz;
1975 phdr->p_memsz = 0;
1976 phdr->p_flags = PF_R;
1977 phdr->p_align = ELF_NOTE_ROUNDSIZE;
1978 phdr++;
1979
1980 /* All the writable segments from the program. */
1981 phc.phdr = phdr;
1982 phc.offset = round_page(hdrsize + notesz);
1983 each_dumpable_segment(td, cb_put_phdr, &phc, flags);
1984 }
1985
1986 static size_t
__elfN(register_regset_note)1987 __elfN(register_regset_note)(struct thread *td, struct note_info_list *list,
1988 struct regset *regset, struct thread *target_td)
1989 {
1990 const struct sysentvec *sv;
1991 struct note_info *ninfo;
1992 size_t size, notesize;
1993
1994 size = 0;
1995 if (!regset->get(regset, target_td, NULL, &size) || size == 0)
1996 return (0);
1997
1998 ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK);
1999 ninfo->type = regset->note;
2000 ninfo->regset = regset;
2001 ninfo->outarg = target_td;
2002 ninfo->outsize = size;
2003 TAILQ_INSERT_TAIL(list, ninfo, link);
2004
2005 sv = td->td_proc->p_sysent;
2006 notesize = sizeof(Elf_Note) + /* note header */
2007 roundup2(strlen(sv->sv_elf_core_abi_vendor) + 1, ELF_NOTE_ROUNDSIZE) +
2008 /* note name */
2009 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */
2010
2011 return (notesize);
2012 }
2013
2014 size_t
__elfN(register_note)2015 __elfN(register_note)(struct thread *td, struct note_info_list *list,
2016 int type, outfunc_t out, void *arg)
2017 {
2018 const struct sysentvec *sv;
2019 struct note_info *ninfo;
2020 size_t size, notesize;
2021
2022 sv = td->td_proc->p_sysent;
2023 size = 0;
2024 out(arg, NULL, &size);
2025 ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK);
2026 ninfo->type = type;
2027 ninfo->outfunc = out;
2028 ninfo->outarg = arg;
2029 ninfo->outsize = size;
2030 TAILQ_INSERT_TAIL(list, ninfo, link);
2031
2032 if (type == -1)
2033 return (size);
2034
2035 notesize = sizeof(Elf_Note) + /* note header */
2036 roundup2(strlen(sv->sv_elf_core_abi_vendor) + 1, ELF_NOTE_ROUNDSIZE) +
2037 /* note name */
2038 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */
2039
2040 return (notesize);
2041 }
2042
2043 static size_t
append_note_data(const void * src,void * dst,size_t len)2044 append_note_data(const void *src, void *dst, size_t len)
2045 {
2046 size_t padded_len;
2047
2048 padded_len = roundup2(len, ELF_NOTE_ROUNDSIZE);
2049 if (dst != NULL) {
2050 bcopy(src, dst, len);
2051 bzero((char *)dst + len, padded_len - len);
2052 }
2053 return (padded_len);
2054 }
2055
2056 size_t
__elfN(populate_note)2057 __elfN(populate_note)(int type, void *src, void *dst, size_t size, void **descp)
2058 {
2059 Elf_Note *note;
2060 char *buf;
2061 size_t notesize;
2062
2063 buf = dst;
2064 if (buf != NULL) {
2065 note = (Elf_Note *)buf;
2066 note->n_namesz = sizeof(FREEBSD_ABI_VENDOR);
2067 note->n_descsz = size;
2068 note->n_type = type;
2069 buf += sizeof(*note);
2070 buf += append_note_data(FREEBSD_ABI_VENDOR, buf,
2071 sizeof(FREEBSD_ABI_VENDOR));
2072 append_note_data(src, buf, size);
2073 if (descp != NULL)
2074 *descp = buf;
2075 }
2076
2077 notesize = sizeof(Elf_Note) + /* note header */
2078 roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) +
2079 /* note name */
2080 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */
2081
2082 return (notesize);
2083 }
2084
2085 static void
__elfN(putnote)2086 __elfN(putnote)(struct thread *td, struct note_info *ninfo, struct sbuf *sb)
2087 {
2088 Elf_Note note;
2089 const struct sysentvec *sv;
2090 ssize_t old_len, sect_len;
2091 size_t new_len, descsz, i;
2092
2093 if (ninfo->type == -1) {
2094 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize);
2095 return;
2096 }
2097
2098 sv = td->td_proc->p_sysent;
2099
2100 note.n_namesz = strlen(sv->sv_elf_core_abi_vendor) + 1;
2101 note.n_descsz = ninfo->outsize;
2102 note.n_type = ninfo->type;
2103
2104 sbuf_bcat(sb, ¬e, sizeof(note));
2105 sbuf_start_section(sb, &old_len);
2106 sbuf_bcat(sb, sv->sv_elf_core_abi_vendor,
2107 strlen(sv->sv_elf_core_abi_vendor) + 1);
2108 sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0);
2109 if (note.n_descsz == 0)
2110 return;
2111 sbuf_start_section(sb, &old_len);
2112 if (ninfo->regset != NULL) {
2113 struct regset *regset = ninfo->regset;
2114 void *buf;
2115
2116 buf = malloc(ninfo->outsize, M_TEMP, M_ZERO | M_WAITOK);
2117 (void)regset->get(regset, ninfo->outarg, buf, &ninfo->outsize);
2118 sbuf_bcat(sb, buf, ninfo->outsize);
2119 free(buf, M_TEMP);
2120 } else
2121 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize);
2122 sect_len = sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0);
2123 if (sect_len < 0)
2124 return;
2125
2126 new_len = (size_t)sect_len;
2127 descsz = roundup(note.n_descsz, ELF_NOTE_ROUNDSIZE);
2128 if (new_len < descsz) {
2129 /*
2130 * It is expected that individual note emitters will correctly
2131 * predict their expected output size and fill up to that size
2132 * themselves, padding in a format-specific way if needed.
2133 * However, in case they don't, just do it here with zeros.
2134 */
2135 for (i = 0; i < descsz - new_len; i++)
2136 sbuf_putc(sb, 0);
2137 } else if (new_len > descsz) {
2138 /*
2139 * We can't always truncate sb -- we may have drained some
2140 * of it already.
2141 */
2142 KASSERT(new_len == descsz, ("%s: Note type %u changed as we "
2143 "read it (%zu > %zu). Since it is longer than "
2144 "expected, this coredump's notes are corrupt. THIS "
2145 "IS A BUG in the note_procstat routine for type %u.\n",
2146 __func__, (unsigned)note.n_type, new_len, descsz,
2147 (unsigned)note.n_type));
2148 }
2149 }
2150
2151 /*
2152 * Miscellaneous note out functions.
2153 */
2154
2155 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2156 #include <compat/freebsd32/freebsd32.h>
2157 #include <compat/freebsd32/freebsd32_signal.h>
2158
2159 typedef struct prstatus32 elf_prstatus_t;
2160 typedef struct prpsinfo32 elf_prpsinfo_t;
2161 typedef struct fpreg32 elf_prfpregset_t;
2162 typedef struct fpreg32 elf_fpregset_t;
2163 typedef struct reg32 elf_gregset_t;
2164 typedef struct thrmisc32 elf_thrmisc_t;
2165 typedef struct ptrace_lwpinfo32 elf_lwpinfo_t;
2166 #define ELF_KERN_PROC_MASK KERN_PROC_MASK32
2167 typedef struct kinfo_proc32 elf_kinfo_proc_t;
2168 typedef uint32_t elf_ps_strings_t;
2169 #else
2170 typedef prstatus_t elf_prstatus_t;
2171 typedef prpsinfo_t elf_prpsinfo_t;
2172 typedef prfpregset_t elf_prfpregset_t;
2173 typedef prfpregset_t elf_fpregset_t;
2174 typedef gregset_t elf_gregset_t;
2175 typedef thrmisc_t elf_thrmisc_t;
2176 typedef struct ptrace_lwpinfo elf_lwpinfo_t;
2177 #define ELF_KERN_PROC_MASK 0
2178 typedef struct kinfo_proc elf_kinfo_proc_t;
2179 typedef vm_offset_t elf_ps_strings_t;
2180 #endif
2181
2182 static void
__elfN(note_prpsinfo)2183 __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep)
2184 {
2185 struct sbuf sbarg;
2186 size_t len;
2187 char *cp, *end;
2188 struct proc *p;
2189 elf_prpsinfo_t *psinfo;
2190 int error;
2191
2192 p = arg;
2193 if (sb != NULL) {
2194 KASSERT(*sizep == sizeof(*psinfo), ("invalid size"));
2195 psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK);
2196 psinfo->pr_version = PRPSINFO_VERSION;
2197 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
2198 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
2199 PROC_LOCK(p);
2200 if (p->p_args != NULL) {
2201 len = sizeof(psinfo->pr_psargs) - 1;
2202 if (len > p->p_args->ar_length)
2203 len = p->p_args->ar_length;
2204 memcpy(psinfo->pr_psargs, p->p_args->ar_args, len);
2205 PROC_UNLOCK(p);
2206 error = 0;
2207 } else {
2208 _PHOLD(p);
2209 PROC_UNLOCK(p);
2210 sbuf_new(&sbarg, psinfo->pr_psargs,
2211 sizeof(psinfo->pr_psargs), SBUF_FIXEDLEN);
2212 error = proc_getargv(curthread, p, &sbarg);
2213 PRELE(p);
2214 if (sbuf_finish(&sbarg) == 0) {
2215 len = sbuf_len(&sbarg);
2216 if (len > 0)
2217 len--;
2218 } else {
2219 len = sizeof(psinfo->pr_psargs) - 1;
2220 }
2221 sbuf_delete(&sbarg);
2222 }
2223 if (error != 0 || len == 0 || (ssize_t)len == -1)
2224 strlcpy(psinfo->pr_psargs, p->p_comm,
2225 sizeof(psinfo->pr_psargs));
2226 else {
2227 KASSERT(len < sizeof(psinfo->pr_psargs),
2228 ("len is too long: %zu vs %zu", len,
2229 sizeof(psinfo->pr_psargs)));
2230 cp = psinfo->pr_psargs;
2231 end = cp + len - 1;
2232 for (;;) {
2233 cp = memchr(cp, '\0', end - cp);
2234 if (cp == NULL)
2235 break;
2236 *cp = ' ';
2237 }
2238 }
2239 psinfo->pr_pid = p->p_pid;
2240 sbuf_bcat(sb, psinfo, sizeof(*psinfo));
2241 free(psinfo, M_TEMP);
2242 }
2243 *sizep = sizeof(*psinfo);
2244 }
2245
2246 static bool
__elfN(get_prstatus)2247 __elfN(get_prstatus)(struct regset *rs, struct thread *td, void *buf,
2248 size_t *sizep)
2249 {
2250 elf_prstatus_t *status;
2251
2252 if (buf != NULL) {
2253 KASSERT(*sizep == sizeof(*status), ("%s: invalid size",
2254 __func__));
2255 status = buf;
2256 memset(status, 0, *sizep);
2257 status->pr_version = PRSTATUS_VERSION;
2258 status->pr_statussz = sizeof(elf_prstatus_t);
2259 status->pr_gregsetsz = sizeof(elf_gregset_t);
2260 status->pr_fpregsetsz = sizeof(elf_fpregset_t);
2261 status->pr_osreldate = osreldate;
2262 status->pr_cursig = td->td_proc->p_sig;
2263 status->pr_pid = td->td_tid;
2264 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2265 fill_regs32(td, &status->pr_reg);
2266 #else
2267 fill_regs(td, &status->pr_reg);
2268 #endif
2269 }
2270 *sizep = sizeof(*status);
2271 return (true);
2272 }
2273
2274 static bool
__elfN(set_prstatus)2275 __elfN(set_prstatus)(struct regset *rs, struct thread *td, void *buf,
2276 size_t size)
2277 {
2278 elf_prstatus_t *status;
2279
2280 KASSERT(size == sizeof(*status), ("%s: invalid size", __func__));
2281 status = buf;
2282 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2283 set_regs32(td, &status->pr_reg);
2284 #else
2285 set_regs(td, &status->pr_reg);
2286 #endif
2287 return (true);
2288 }
2289
2290 static struct regset __elfN(regset_prstatus) = {
2291 .note = NT_PRSTATUS,
2292 .size = sizeof(elf_prstatus_t),
2293 .get = __elfN(get_prstatus),
2294 .set = __elfN(set_prstatus),
2295 };
2296 ELF_REGSET(__elfN(regset_prstatus));
2297
2298 static bool
__elfN(get_fpregset)2299 __elfN(get_fpregset)(struct regset *rs, struct thread *td, void *buf,
2300 size_t *sizep)
2301 {
2302 elf_prfpregset_t *fpregset;
2303
2304 if (buf != NULL) {
2305 KASSERT(*sizep == sizeof(*fpregset), ("%s: invalid size",
2306 __func__));
2307 fpregset = buf;
2308 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2309 fill_fpregs32(td, fpregset);
2310 #else
2311 fill_fpregs(td, fpregset);
2312 #endif
2313 }
2314 *sizep = sizeof(*fpregset);
2315 return (true);
2316 }
2317
2318 static bool
__elfN(set_fpregset)2319 __elfN(set_fpregset)(struct regset *rs, struct thread *td, void *buf,
2320 size_t size)
2321 {
2322 elf_prfpregset_t *fpregset;
2323
2324 fpregset = buf;
2325 KASSERT(size == sizeof(*fpregset), ("%s: invalid size", __func__));
2326 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2327 set_fpregs32(td, fpregset);
2328 #else
2329 set_fpregs(td, fpregset);
2330 #endif
2331 return (true);
2332 }
2333
2334 static struct regset __elfN(regset_fpregset) = {
2335 .note = NT_FPREGSET,
2336 .size = sizeof(elf_prfpregset_t),
2337 .get = __elfN(get_fpregset),
2338 .set = __elfN(set_fpregset),
2339 };
2340 ELF_REGSET(__elfN(regset_fpregset));
2341
2342 static bool
__elfN(get_thrmisc)2343 __elfN(get_thrmisc)(struct regset *rs, struct thread *td, void *buf,
2344 size_t *sizep)
2345 {
2346 elf_thrmisc_t *thrmisc;
2347
2348 if (buf != NULL) {
2349 KASSERT(*sizep == sizeof(*thrmisc),
2350 ("%s: invalid size", __func__));
2351 thrmisc = buf;
2352 bzero(thrmisc, sizeof(*thrmisc));
2353 strcpy(thrmisc->pr_tname, td->td_name);
2354 }
2355 *sizep = sizeof(*thrmisc);
2356 return (true);
2357 }
2358
2359 static struct regset __elfN(regset_thrmisc) = {
2360 .note = NT_THRMISC,
2361 .size = sizeof(elf_thrmisc_t),
2362 .get = __elfN(get_thrmisc),
2363 };
2364 ELF_REGSET(__elfN(regset_thrmisc));
2365
2366 static bool
__elfN(get_lwpinfo)2367 __elfN(get_lwpinfo)(struct regset *rs, struct thread *td, void *buf,
2368 size_t *sizep)
2369 {
2370 elf_lwpinfo_t pl;
2371 size_t size;
2372 int structsize;
2373
2374 size = sizeof(structsize) + sizeof(pl);
2375 if (buf != NULL) {
2376 KASSERT(*sizep == size, ("%s: invalid size", __func__));
2377 structsize = sizeof(pl);
2378 memcpy(buf, &structsize, sizeof(structsize));
2379 bzero(&pl, sizeof(pl));
2380 pl.pl_lwpid = td->td_tid;
2381 pl.pl_event = PL_EVENT_NONE;
2382 pl.pl_sigmask = td->td_sigmask;
2383 pl.pl_siglist = td->td_siglist;
2384 if (td->td_si.si_signo != 0) {
2385 pl.pl_event = PL_EVENT_SIGNAL;
2386 pl.pl_flags |= PL_FLAG_SI;
2387 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2388 siginfo_to_siginfo32(&td->td_si, &pl.pl_siginfo);
2389 #else
2390 pl.pl_siginfo = td->td_si;
2391 #endif
2392 }
2393 strcpy(pl.pl_tdname, td->td_name);
2394 /* XXX TODO: supply more information in struct ptrace_lwpinfo*/
2395 memcpy((int *)buf + 1, &pl, sizeof(pl));
2396 }
2397 *sizep = size;
2398 return (true);
2399 }
2400
2401 static struct regset __elfN(regset_lwpinfo) = {
2402 .note = NT_PTLWPINFO,
2403 .size = sizeof(int) + sizeof(elf_lwpinfo_t),
2404 .get = __elfN(get_lwpinfo),
2405 };
2406 ELF_REGSET(__elfN(regset_lwpinfo));
2407
2408 static size_t
__elfN(prepare_register_notes)2409 __elfN(prepare_register_notes)(struct thread *td, struct note_info_list *list,
2410 struct thread *target_td)
2411 {
2412 struct sysentvec *sv = td->td_proc->p_sysent;
2413 struct regset **regsetp, **regset_end, *regset;
2414 size_t size;
2415
2416 size = 0;
2417
2418 /* NT_PRSTATUS must be the first register set note. */
2419 size += __elfN(register_regset_note)(td, list, &__elfN(regset_prstatus),
2420 target_td);
2421
2422 regsetp = sv->sv_regset_begin;
2423 if (regsetp == NULL) {
2424 /* XXX: This shouldn't be true for any FreeBSD ABIs. */
2425 size += __elfN(register_regset_note)(td, list,
2426 &__elfN(regset_fpregset), target_td);
2427 return (size);
2428 }
2429 regset_end = sv->sv_regset_end;
2430 MPASS(regset_end != NULL);
2431 for (; regsetp < regset_end; regsetp++) {
2432 regset = *regsetp;
2433 if (regset->note == NT_PRSTATUS)
2434 continue;
2435 size += __elfN(register_regset_note)(td, list, regset,
2436 target_td);
2437 }
2438 return (size);
2439 }
2440
2441 /*
2442 * Allow for MD specific notes, as well as any MD
2443 * specific preparations for writing MI notes.
2444 */
2445 static void
__elfN(note_threadmd)2446 __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep)
2447 {
2448 struct thread *td;
2449 void *buf;
2450 size_t size;
2451
2452 td = (struct thread *)arg;
2453 size = *sizep;
2454 if (size != 0 && sb != NULL)
2455 buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK);
2456 else
2457 buf = NULL;
2458 size = 0;
2459 __elfN(dump_thread)(td, buf, &size);
2460 KASSERT(sb == NULL || *sizep == size, ("invalid size"));
2461 if (size != 0 && sb != NULL)
2462 sbuf_bcat(sb, buf, size);
2463 free(buf, M_TEMP);
2464 *sizep = size;
2465 }
2466
2467 #ifdef KINFO_PROC_SIZE
2468 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
2469 #endif
2470
2471 static void
__elfN(note_procstat_proc)2472 __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep)
2473 {
2474 struct proc *p;
2475 size_t size;
2476 int structsize;
2477
2478 p = arg;
2479 size = sizeof(structsize) + p->p_numthreads *
2480 sizeof(elf_kinfo_proc_t);
2481
2482 if (sb != NULL) {
2483 KASSERT(*sizep == size, ("invalid size"));
2484 structsize = sizeof(elf_kinfo_proc_t);
2485 sbuf_bcat(sb, &structsize, sizeof(structsize));
2486 sx_slock(&proctree_lock);
2487 PROC_LOCK(p);
2488 kern_proc_out(p, sb, ELF_KERN_PROC_MASK);
2489 sx_sunlock(&proctree_lock);
2490 }
2491 *sizep = size;
2492 }
2493
2494 #ifdef KINFO_FILE_SIZE
2495 CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE);
2496 #endif
2497
2498 static void
note_procstat_files(void * arg,struct sbuf * sb,size_t * sizep)2499 note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep)
2500 {
2501 struct proc *p;
2502 size_t size, sect_sz, i;
2503 ssize_t start_len, sect_len;
2504 int structsize, filedesc_flags;
2505
2506 if (coredump_pack_fileinfo)
2507 filedesc_flags = KERN_FILEDESC_PACK_KINFO;
2508 else
2509 filedesc_flags = 0;
2510
2511 p = arg;
2512 structsize = sizeof(struct kinfo_file);
2513 if (sb == NULL) {
2514 size = 0;
2515 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
2516 sbuf_set_drain(sb, sbuf_count_drain, &size);
2517 sbuf_bcat(sb, &structsize, sizeof(structsize));
2518 PROC_LOCK(p);
2519 kern_proc_filedesc_out(p, sb, -1, filedesc_flags);
2520 sbuf_finish(sb);
2521 sbuf_delete(sb);
2522 *sizep = size;
2523 } else {
2524 sbuf_start_section(sb, &start_len);
2525
2526 sbuf_bcat(sb, &structsize, sizeof(structsize));
2527 PROC_LOCK(p);
2528 kern_proc_filedesc_out(p, sb, *sizep - sizeof(structsize),
2529 filedesc_flags);
2530
2531 sect_len = sbuf_end_section(sb, start_len, 0, 0);
2532 if (sect_len < 0)
2533 return;
2534 sect_sz = sect_len;
2535
2536 KASSERT(sect_sz <= *sizep,
2537 ("kern_proc_filedesc_out did not respect maxlen; "
2538 "requested %zu, got %zu", *sizep - sizeof(structsize),
2539 sect_sz - sizeof(structsize)));
2540
2541 for (i = 0; i < *sizep - sect_sz && sb->s_error == 0; i++)
2542 sbuf_putc(sb, 0);
2543 }
2544 }
2545
2546 #ifdef KINFO_VMENTRY_SIZE
2547 CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE);
2548 #endif
2549
2550 static void
note_procstat_vmmap(void * arg,struct sbuf * sb,size_t * sizep)2551 note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep)
2552 {
2553 struct proc *p;
2554 size_t size;
2555 int structsize, vmmap_flags;
2556
2557 if (coredump_pack_vmmapinfo)
2558 vmmap_flags = KERN_VMMAP_PACK_KINFO;
2559 else
2560 vmmap_flags = 0;
2561
2562 p = arg;
2563 structsize = sizeof(struct kinfo_vmentry);
2564 if (sb == NULL) {
2565 size = 0;
2566 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
2567 sbuf_set_drain(sb, sbuf_count_drain, &size);
2568 sbuf_bcat(sb, &structsize, sizeof(structsize));
2569 PROC_LOCK(p);
2570 kern_proc_vmmap_out(p, sb, -1, vmmap_flags);
2571 sbuf_finish(sb);
2572 sbuf_delete(sb);
2573 *sizep = size;
2574 } else {
2575 sbuf_bcat(sb, &structsize, sizeof(structsize));
2576 PROC_LOCK(p);
2577 kern_proc_vmmap_out(p, sb, *sizep - sizeof(structsize),
2578 vmmap_flags);
2579 }
2580 }
2581
2582 static void
note_procstat_groups(void * arg,struct sbuf * sb,size_t * sizep)2583 note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep)
2584 {
2585 struct proc *p;
2586 size_t size;
2587 int structsize;
2588
2589 p = arg;
2590 size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t);
2591 if (sb != NULL) {
2592 KASSERT(*sizep == size, ("invalid size"));
2593 structsize = sizeof(gid_t);
2594 sbuf_bcat(sb, &structsize, sizeof(structsize));
2595 sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups *
2596 sizeof(gid_t));
2597 }
2598 *sizep = size;
2599 }
2600
2601 static void
note_procstat_umask(void * arg,struct sbuf * sb,size_t * sizep)2602 note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep)
2603 {
2604 struct proc *p;
2605 size_t size;
2606 int structsize;
2607
2608 p = arg;
2609 size = sizeof(structsize) + sizeof(p->p_pd->pd_cmask);
2610 if (sb != NULL) {
2611 KASSERT(*sizep == size, ("invalid size"));
2612 structsize = sizeof(p->p_pd->pd_cmask);
2613 sbuf_bcat(sb, &structsize, sizeof(structsize));
2614 sbuf_bcat(sb, &p->p_pd->pd_cmask, sizeof(p->p_pd->pd_cmask));
2615 }
2616 *sizep = size;
2617 }
2618
2619 static void
note_procstat_rlimit(void * arg,struct sbuf * sb,size_t * sizep)2620 note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep)
2621 {
2622 struct proc *p;
2623 struct rlimit rlim[RLIM_NLIMITS];
2624 size_t size;
2625 int structsize, i;
2626
2627 p = arg;
2628 size = sizeof(structsize) + sizeof(rlim);
2629 if (sb != NULL) {
2630 KASSERT(*sizep == size, ("invalid size"));
2631 structsize = sizeof(rlim);
2632 sbuf_bcat(sb, &structsize, sizeof(structsize));
2633 PROC_LOCK(p);
2634 for (i = 0; i < RLIM_NLIMITS; i++)
2635 lim_rlimit_proc(p, i, &rlim[i]);
2636 PROC_UNLOCK(p);
2637 sbuf_bcat(sb, rlim, sizeof(rlim));
2638 }
2639 *sizep = size;
2640 }
2641
2642 static void
note_procstat_osrel(void * arg,struct sbuf * sb,size_t * sizep)2643 note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep)
2644 {
2645 struct proc *p;
2646 size_t size;
2647 int structsize;
2648
2649 p = arg;
2650 size = sizeof(structsize) + sizeof(p->p_osrel);
2651 if (sb != NULL) {
2652 KASSERT(*sizep == size, ("invalid size"));
2653 structsize = sizeof(p->p_osrel);
2654 sbuf_bcat(sb, &structsize, sizeof(structsize));
2655 sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel));
2656 }
2657 *sizep = size;
2658 }
2659
2660 static void
__elfN(note_procstat_psstrings)2661 __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep)
2662 {
2663 struct proc *p;
2664 elf_ps_strings_t ps_strings;
2665 size_t size;
2666 int structsize;
2667
2668 p = arg;
2669 size = sizeof(structsize) + sizeof(ps_strings);
2670 if (sb != NULL) {
2671 KASSERT(*sizep == size, ("invalid size"));
2672 structsize = sizeof(ps_strings);
2673 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2674 ps_strings = PTROUT(PROC_PS_STRINGS(p));
2675 #else
2676 ps_strings = PROC_PS_STRINGS(p);
2677 #endif
2678 sbuf_bcat(sb, &structsize, sizeof(structsize));
2679 sbuf_bcat(sb, &ps_strings, sizeof(ps_strings));
2680 }
2681 *sizep = size;
2682 }
2683
2684 static void
__elfN(note_procstat_auxv)2685 __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep)
2686 {
2687 struct proc *p;
2688 size_t size;
2689 int structsize;
2690
2691 p = arg;
2692 if (sb == NULL) {
2693 size = 0;
2694 sb = sbuf_new(NULL, NULL, AT_COUNT * sizeof(Elf_Auxinfo),
2695 SBUF_FIXEDLEN);
2696 sbuf_set_drain(sb, sbuf_count_drain, &size);
2697 sbuf_bcat(sb, &structsize, sizeof(structsize));
2698 PHOLD(p);
2699 proc_getauxv(curthread, p, sb);
2700 PRELE(p);
2701 sbuf_finish(sb);
2702 sbuf_delete(sb);
2703 *sizep = size;
2704 } else {
2705 structsize = sizeof(Elf_Auxinfo);
2706 sbuf_bcat(sb, &structsize, sizeof(structsize));
2707 PHOLD(p);
2708 proc_getauxv(curthread, p, sb);
2709 PRELE(p);
2710 }
2711 }
2712
2713 #define MAX_NOTES_LOOP 4096
2714 bool
__elfN(parse_notes)2715 __elfN(parse_notes)(const struct image_params *imgp, const Elf_Note *checknote,
2716 const char *note_vendor, const Elf_Phdr *pnote,
2717 bool (*cb)(const Elf_Note *, void *, bool *), void *cb_arg)
2718 {
2719 const Elf_Note *note, *note0, *note_end;
2720 const char *note_name;
2721 char *buf;
2722 int i, error;
2723 bool res;
2724
2725 /* We need some limit, might as well use PAGE_SIZE. */
2726 if (pnote == NULL || pnote->p_filesz > PAGE_SIZE)
2727 return (false);
2728 ASSERT_VOP_LOCKED(imgp->vp, "parse_notes");
2729 if (pnote->p_offset > PAGE_SIZE ||
2730 pnote->p_filesz > PAGE_SIZE - pnote->p_offset) {
2731 buf = malloc(pnote->p_filesz, M_TEMP, M_NOWAIT);
2732 if (buf == NULL) {
2733 VOP_UNLOCK(imgp->vp);
2734 buf = malloc(pnote->p_filesz, M_TEMP, M_WAITOK);
2735 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
2736 }
2737 error = vn_rdwr(UIO_READ, imgp->vp, buf, pnote->p_filesz,
2738 pnote->p_offset, UIO_SYSSPACE, IO_NODELOCKED,
2739 curthread->td_ucred, NOCRED, NULL, curthread);
2740 if (error != 0) {
2741 uprintf("i/o error PT_NOTE\n");
2742 goto retf;
2743 }
2744 note = note0 = (const Elf_Note *)buf;
2745 note_end = (const Elf_Note *)(buf + pnote->p_filesz);
2746 } else {
2747 note = note0 = (const Elf_Note *)(imgp->image_header +
2748 pnote->p_offset);
2749 note_end = (const Elf_Note *)(imgp->image_header +
2750 pnote->p_offset + pnote->p_filesz);
2751 buf = NULL;
2752 }
2753 for (i = 0; i < MAX_NOTES_LOOP && note >= note0 && note < note_end;
2754 i++) {
2755 if (!aligned(note, Elf32_Addr)) {
2756 uprintf("Unaligned ELF note\n");
2757 goto retf;
2758 }
2759 if ((const char *)note_end - (const char *)note <
2760 sizeof(Elf_Note)) {
2761 uprintf("ELF note to short\n");
2762 goto retf;
2763 }
2764 if (note->n_namesz != checknote->n_namesz ||
2765 note->n_descsz != checknote->n_descsz ||
2766 note->n_type != checknote->n_type)
2767 goto nextnote;
2768 note_name = (const char *)(note + 1);
2769 if (note_name + checknote->n_namesz >=
2770 (const char *)note_end || strncmp(note_vendor,
2771 note_name, checknote->n_namesz) != 0)
2772 goto nextnote;
2773
2774 if (cb(note, cb_arg, &res))
2775 goto ret;
2776 nextnote:
2777 note = (const Elf_Note *)((const char *)(note + 1) +
2778 roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) +
2779 roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE));
2780 }
2781 if (i >= MAX_NOTES_LOOP)
2782 uprintf("ELF note parser reached %d notes\n", i);
2783 retf:
2784 res = false;
2785 ret:
2786 free(buf, M_TEMP);
2787 return (res);
2788 }
2789
2790 struct brandnote_cb_arg {
2791 Elf_Brandnote *brandnote;
2792 int32_t *osrel;
2793 };
2794
2795 static bool
brandnote_cb(const Elf_Note * note,void * arg0,bool * res)2796 brandnote_cb(const Elf_Note *note, void *arg0, bool *res)
2797 {
2798 struct brandnote_cb_arg *arg;
2799
2800 arg = arg0;
2801
2802 /*
2803 * Fetch the osreldate for binary from the ELF OSABI-note if
2804 * necessary.
2805 */
2806 *res = (arg->brandnote->flags & BN_TRANSLATE_OSREL) != 0 &&
2807 arg->brandnote->trans_osrel != NULL ?
2808 arg->brandnote->trans_osrel(note, arg->osrel) : true;
2809
2810 return (true);
2811 }
2812
2813 static Elf_Note fctl_note = {
2814 .n_namesz = sizeof(FREEBSD_ABI_VENDOR),
2815 .n_descsz = sizeof(uint32_t),
2816 .n_type = NT_FREEBSD_FEATURE_CTL,
2817 };
2818
2819 struct fctl_cb_arg {
2820 bool *has_fctl0;
2821 uint32_t *fctl0;
2822 };
2823
2824 static bool
note_fctl_cb(const Elf_Note * note,void * arg0,bool * res)2825 note_fctl_cb(const Elf_Note *note, void *arg0, bool *res)
2826 {
2827 struct fctl_cb_arg *arg;
2828 const Elf32_Word *desc;
2829 uintptr_t p;
2830
2831 arg = arg0;
2832 p = (uintptr_t)(note + 1);
2833 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
2834 desc = (const Elf32_Word *)p;
2835 *arg->has_fctl0 = true;
2836 *arg->fctl0 = desc[0];
2837 *res = true;
2838 return (true);
2839 }
2840
2841 /*
2842 * Try to find the appropriate ABI-note section for checknote, fetch
2843 * the osreldate and feature control flags for binary from the ELF
2844 * OSABI-note. Only the first page of the image is searched, the same
2845 * as for headers.
2846 */
2847 static bool
__elfN(check_note)2848 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *brandnote,
2849 int32_t *osrel, bool *has_fctl0, uint32_t *fctl0)
2850 {
2851 const Elf_Phdr *phdr;
2852 const Elf_Ehdr *hdr;
2853 struct brandnote_cb_arg b_arg;
2854 struct fctl_cb_arg f_arg;
2855 int i, j;
2856
2857 hdr = (const Elf_Ehdr *)imgp->image_header;
2858 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
2859 b_arg.brandnote = brandnote;
2860 b_arg.osrel = osrel;
2861 f_arg.has_fctl0 = has_fctl0;
2862 f_arg.fctl0 = fctl0;
2863
2864 for (i = 0; i < hdr->e_phnum; i++) {
2865 if (phdr[i].p_type == PT_NOTE && __elfN(parse_notes)(imgp,
2866 &brandnote->hdr, brandnote->vendor, &phdr[i], brandnote_cb,
2867 &b_arg)) {
2868 for (j = 0; j < hdr->e_phnum; j++) {
2869 if (phdr[j].p_type == PT_NOTE &&
2870 __elfN(parse_notes)(imgp, &fctl_note,
2871 FREEBSD_ABI_VENDOR, &phdr[j],
2872 note_fctl_cb, &f_arg))
2873 break;
2874 }
2875 return (true);
2876 }
2877 }
2878 return (false);
2879
2880 }
2881
2882 /*
2883 * Tell kern_execve.c about it, with a little help from the linker.
2884 */
2885 static struct execsw __elfN(execsw) = {
2886 .ex_imgact = __CONCAT(exec_, __elfN(imgact)),
2887 .ex_name = __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
2888 };
2889 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
2890
2891 static vm_prot_t
__elfN(trans_prot)2892 __elfN(trans_prot)(Elf_Word flags)
2893 {
2894 vm_prot_t prot;
2895
2896 prot = 0;
2897 if (flags & PF_X)
2898 prot |= VM_PROT_EXECUTE;
2899 if (flags & PF_W)
2900 prot |= VM_PROT_WRITE;
2901 if (flags & PF_R)
2902 prot |= VM_PROT_READ;
2903 #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__))
2904 if (i386_read_exec && (flags & PF_R))
2905 prot |= VM_PROT_EXECUTE;
2906 #endif
2907 return (prot);
2908 }
2909
2910 static Elf_Word
__elfN(untrans_prot)2911 __elfN(untrans_prot)(vm_prot_t prot)
2912 {
2913 Elf_Word flags;
2914
2915 flags = 0;
2916 if (prot & VM_PROT_EXECUTE)
2917 flags |= PF_X;
2918 if (prot & VM_PROT_READ)
2919 flags |= PF_R;
2920 if (prot & VM_PROT_WRITE)
2921 flags |= PF_W;
2922 return (flags);
2923 }
2924