1 /*- 2 * Copyright (c) 1995-1996 S�ren Schmidt 3 * Copyright (c) 1996 Peter Wemm 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer 11 * in this position and unchanged. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software withough specific prior written permission 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $FreeBSD: src/sys/kern/imgact_elf.c,v 1.73.2.13 2002/12/28 19:49:41 dillon Exp $ 30 * $DragonFly: src/sys/kern/imgact_elf.c,v 1.9 2003/09/23 05:03:51 dillon Exp $ 31 */ 32 33 #include <sys/param.h> 34 #include <sys/exec.h> 35 #include <sys/fcntl.h> 36 #include <sys/imgact.h> 37 #include <sys/imgact_elf.h> 38 #include <sys/kernel.h> 39 #include <sys/malloc.h> 40 #include <sys/mman.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/namei.h> 44 #include <sys/pioctl.h> 45 #include <sys/procfs.h> 46 #include <sys/resourcevar.h> 47 #include <sys/signalvar.h> 48 #include <sys/stat.h> 49 #include <sys/syscall.h> 50 #include <sys/sysctl.h> 51 #include <sys/sysent.h> 52 #include <sys/vnode.h> 53 54 #include <vm/vm.h> 55 #include <vm/vm_kern.h> 56 #include <vm/vm_param.h> 57 #include <vm/pmap.h> 58 #include <sys/lock.h> 59 #include <vm/vm_map.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_extern.h> 62 63 #include <machine/elf.h> 64 #include <machine/md_var.h> 65 66 #define OLD_EI_BRAND 8 67 68 __ElfType(Brandinfo); 69 __ElfType(Auxargs); 70 71 static int elf_check_header (const Elf_Ehdr *hdr); 72 static int elf_freebsd_fixup (register_t **stack_base, 73 struct image_params *imgp); 74 static int elf_load_file (struct proc *p, const char *file, u_long *addr, 75 u_long *entry); 76 static int elf_load_section (struct proc *p, 77 struct vmspace *vmspace, struct vnode *vp, 78 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, 79 vm_prot_t prot); 80 static int exec_elf_imgact (struct image_params *imgp); 81 82 static int elf_trace = 0; 83 SYSCTL_INT(_debug, OID_AUTO, elf_trace, CTLFLAG_RW, &elf_trace, 0, ""); 84 static int elf_legacy_coredump = 0; 85 SYSCTL_INT(_debug, OID_AUTO, elf_legacy_coredump, CTLFLAG_RW, 86 &elf_legacy_coredump, 0, ""); 87 88 static struct sysentvec elf_freebsd_sysvec = { 89 SYS_MAXSYSCALL, 90 sysent, 91 0, 92 0, 93 0, 94 0, 95 0, 96 0, 97 elf_freebsd_fixup, 98 sendsig, 99 sigcode, 100 &szsigcode, 101 0, 102 "FreeBSD ELF", 103 elf_coredump, 104 NULL, 105 MINSIGSTKSZ 106 }; 107 108 static Elf_Brandinfo freebsd_brand_info = { 109 ELFOSABI_FREEBSD, 110 "FreeBSD", 111 "", 112 "/usr/libexec/ld-elf.so.1", 113 &elf_freebsd_sysvec 114 }; 115 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS] = { 116 &freebsd_brand_info, 117 NULL, NULL, NULL, 118 NULL, NULL, NULL, NULL 119 }; 120 121 int 122 elf_insert_brand_entry(Elf_Brandinfo *entry) 123 { 124 int i; 125 126 for (i=1; i<MAX_BRANDS; i++) { 127 if (elf_brand_list[i] == NULL) { 128 elf_brand_list[i] = entry; 129 break; 130 } 131 } 132 if (i == MAX_BRANDS) 133 return -1; 134 return 0; 135 } 136 137 int 138 elf_remove_brand_entry(Elf_Brandinfo *entry) 139 { 140 int i; 141 142 for (i=1; i<MAX_BRANDS; i++) { 143 if (elf_brand_list[i] == entry) { 144 elf_brand_list[i] = NULL; 145 break; 146 } 147 } 148 if (i == MAX_BRANDS) 149 return -1; 150 return 0; 151 } 152 153 int 154 elf_brand_inuse(Elf_Brandinfo *entry) 155 { 156 struct proc *p; 157 int rval = FALSE; 158 159 FOREACH_PROC_IN_SYSTEM(p) { 160 if (p->p_sysent == entry->sysvec) { 161 rval = TRUE; 162 break; 163 } 164 } 165 166 return (rval); 167 } 168 169 static int 170 elf_check_header(const Elf_Ehdr *hdr) 171 { 172 if (!IS_ELF(*hdr) || 173 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 174 hdr->e_ident[EI_DATA] != ELF_TARG_DATA || 175 hdr->e_ident[EI_VERSION] != EV_CURRENT) 176 return ENOEXEC; 177 178 if (!ELF_MACHINE_OK(hdr->e_machine)) 179 return ENOEXEC; 180 181 if (hdr->e_version != ELF_TARG_VER) 182 return ENOEXEC; 183 184 return 0; 185 } 186 187 static int 188 elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot) 189 { 190 size_t map_len; 191 vm_offset_t map_addr; 192 int error, rv, cow; 193 int count; 194 size_t copy_len; 195 vm_object_t object; 196 vm_offset_t file_addr; 197 vm_offset_t data_buf = 0; 198 199 VOP_GETVOBJECT(vp, &object); 200 error = 0; 201 202 /* 203 * It's necessary to fail if the filsz + offset taken from the 204 * header is greater than the actual file pager object's size. 205 * If we were to allow this, then the vm_map_find() below would 206 * walk right off the end of the file object and into the ether. 207 * 208 * While I'm here, might as well check for something else that 209 * is invalid: filsz cannot be greater than memsz. 210 */ 211 if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size || 212 filsz > memsz) { 213 uprintf("elf_load_section: truncated ELF file\n"); 214 return (ENOEXEC); 215 } 216 217 map_addr = trunc_page((vm_offset_t)vmaddr); 218 file_addr = trunc_page(offset); 219 220 /* 221 * We have two choices. We can either clear the data in the last page 222 * of an oversized mapping, or we can start the anon mapping a page 223 * early and copy the initialized data into that first page. We 224 * choose the second.. 225 */ 226 if (memsz > filsz) 227 map_len = trunc_page(offset+filsz) - file_addr; 228 else 229 map_len = round_page(offset+filsz) - file_addr; 230 231 if (map_len != 0) { 232 vm_object_reference(object); 233 234 /* cow flags: don't dump readonly sections in core */ 235 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT | 236 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP); 237 238 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 239 vm_map_lock(&vmspace->vm_map); 240 rv = vm_map_insert(&vmspace->vm_map, &count, 241 object, 242 file_addr, /* file offset */ 243 map_addr, /* virtual start */ 244 map_addr + map_len,/* virtual end */ 245 prot, 246 VM_PROT_ALL, 247 cow); 248 vm_map_unlock(&vmspace->vm_map); 249 vm_map_entry_release(count); 250 if (rv != KERN_SUCCESS) { 251 vm_object_deallocate(object); 252 return EINVAL; 253 } 254 255 /* we can stop now if we've covered it all */ 256 if (memsz == filsz) { 257 return 0; 258 } 259 } 260 261 262 /* 263 * We have to get the remaining bit of the file into the first part 264 * of the oversized map segment. This is normally because the .data 265 * segment in the file is extended to provide bss. It's a neat idea 266 * to try and save a page, but it's a pain in the behind to implement. 267 */ 268 copy_len = (offset + filsz) - trunc_page(offset + filsz); 269 map_addr = trunc_page((vm_offset_t)vmaddr + filsz); 270 map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr; 271 272 /* This had damn well better be true! */ 273 if (map_len != 0) { 274 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 275 vm_map_lock(&vmspace->vm_map); 276 rv = vm_map_insert(&vmspace->vm_map, &count, 277 NULL, 0, 278 map_addr, map_addr + map_len, 279 VM_PROT_ALL, VM_PROT_ALL, 0); 280 vm_map_unlock(&vmspace->vm_map); 281 vm_map_entry_release(count); 282 if (rv != KERN_SUCCESS) { 283 return EINVAL; 284 } 285 } 286 287 if (copy_len != 0) { 288 vm_object_reference(object); 289 rv = vm_map_find(exec_map, 290 object, 291 trunc_page(offset + filsz), 292 &data_buf, 293 PAGE_SIZE, 294 TRUE, 295 VM_PROT_READ, 296 VM_PROT_ALL, 297 MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL); 298 if (rv != KERN_SUCCESS) { 299 vm_object_deallocate(object); 300 return EINVAL; 301 } 302 303 /* send the page fragment to user space */ 304 error = copyout((caddr_t)data_buf, (caddr_t)map_addr, copy_len); 305 vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE); 306 if (error) { 307 return (error); 308 } 309 } 310 311 /* 312 * set it to the specified protection 313 */ 314 vm_map_protect(&vmspace->vm_map, map_addr, map_addr + map_len, prot, 315 FALSE); 316 317 return error; 318 } 319 320 /* 321 * Load the file "file" into memory. It may be either a shared object 322 * or an executable. 323 * 324 * The "addr" reference parameter is in/out. On entry, it specifies 325 * the address where a shared object should be loaded. If the file is 326 * an executable, this value is ignored. On exit, "addr" specifies 327 * where the file was actually loaded. 328 * 329 * The "entry" reference parameter is out only. On exit, it specifies 330 * the entry point for the loaded file. 331 */ 332 static int 333 elf_load_file(struct proc *p, const char *file, u_long *addr, u_long *entry) 334 { 335 struct { 336 struct nameidata nd; 337 struct vattr attr; 338 struct image_params image_params; 339 } *tempdata; 340 const Elf_Ehdr *hdr = NULL; 341 const Elf_Phdr *phdr = NULL; 342 struct nameidata *nd; 343 struct vmspace *vmspace = p->p_vmspace; 344 struct vattr *attr; 345 struct image_params *imgp; 346 vm_prot_t prot; 347 u_long rbase; 348 u_long base_addr = 0; 349 int error, i, numsegs; 350 struct thread *td = p->p_thread; 351 352 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK); 353 nd = &tempdata->nd; 354 attr = &tempdata->attr; 355 imgp = &tempdata->image_params; 356 357 /* 358 * Initialize part of the common data 359 */ 360 imgp->proc = p; 361 imgp->uap = NULL; 362 imgp->attr = attr; 363 imgp->firstpage = NULL; 364 imgp->image_header = (char *)kmem_alloc_wait(exec_map, PAGE_SIZE); 365 366 if (imgp->image_header == NULL) { 367 nd->ni_vp = NULL; 368 error = ENOMEM; 369 goto fail; 370 } 371 372 NDINIT(nd, NAMEI_LOOKUP, CNP_LOCKLEAF | CNP_FOLLOW, 373 UIO_SYSSPACE, file, td); 374 375 if ((error = namei(nd)) != 0) { 376 nd->ni_vp = NULL; 377 goto fail; 378 } 379 NDFREE(nd, NDF_ONLY_PNBUF); 380 imgp->vp = nd->ni_vp; 381 382 /* 383 * Check permissions, modes, uid, etc on the file, and "open" it. 384 */ 385 error = exec_check_permissions(imgp); 386 if (error) { 387 VOP_UNLOCK(nd->ni_vp, 0, td); 388 goto fail; 389 } 390 391 error = exec_map_first_page(imgp); 392 /* 393 * Also make certain that the interpreter stays the same, so set 394 * its VTEXT flag, too. 395 */ 396 if (error == 0) 397 nd->ni_vp->v_flag |= VTEXT; 398 VOP_UNLOCK(nd->ni_vp, 0, td); 399 if (error) 400 goto fail; 401 402 hdr = (const Elf_Ehdr *)imgp->image_header; 403 if ((error = elf_check_header(hdr)) != 0) 404 goto fail; 405 if (hdr->e_type == ET_DYN) 406 rbase = *addr; 407 else if (hdr->e_type == ET_EXEC) 408 rbase = 0; 409 else { 410 error = ENOEXEC; 411 goto fail; 412 } 413 414 /* Only support headers that fit within first page for now */ 415 if ((hdr->e_phoff > PAGE_SIZE) || 416 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) { 417 error = ENOEXEC; 418 goto fail; 419 } 420 421 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); 422 423 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) { 424 if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */ 425 prot = 0; 426 if (phdr[i].p_flags & PF_X) 427 prot |= VM_PROT_EXECUTE; 428 if (phdr[i].p_flags & PF_W) 429 prot |= VM_PROT_WRITE; 430 if (phdr[i].p_flags & PF_R) 431 prot |= VM_PROT_READ; 432 433 error = elf_load_section( 434 p, vmspace, nd->ni_vp, 435 phdr[i].p_offset, 436 (caddr_t)phdr[i].p_vaddr + 437 rbase, 438 phdr[i].p_memsz, 439 phdr[i].p_filesz, prot); 440 if (error != 0) 441 goto fail; 442 /* 443 * Establish the base address if this is the 444 * first segment. 445 */ 446 if (numsegs == 0) 447 base_addr = trunc_page(phdr[i].p_vaddr + rbase); 448 numsegs++; 449 } 450 } 451 *addr = base_addr; 452 *entry=(unsigned long)hdr->e_entry + rbase; 453 454 fail: 455 if (imgp->firstpage) 456 exec_unmap_first_page(imgp); 457 if (imgp->image_header) 458 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->image_header, 459 PAGE_SIZE); 460 if (nd->ni_vp) 461 vrele(nd->ni_vp); 462 463 free(tempdata, M_TEMP); 464 465 return error; 466 } 467 468 /* 469 * non static, as it can be overridden by start_init() 470 */ 471 int fallback_elf_brand = -1; 472 SYSCTL_INT(_kern, OID_AUTO, fallback_elf_brand, CTLFLAG_RW, 473 &fallback_elf_brand, -1, 474 "ELF brand of last resort"); 475 476 static int 477 exec_elf_imgact(struct image_params *imgp) 478 { 479 const Elf_Ehdr *hdr = (const Elf_Ehdr *) imgp->image_header; 480 const Elf_Phdr *phdr; 481 Elf_Auxargs *elf_auxargs = NULL; 482 struct vmspace *vmspace; 483 vm_prot_t prot; 484 u_long text_size = 0, data_size = 0, total_size = 0; 485 u_long text_addr = 0, data_addr = 0; 486 u_long seg_size, seg_addr; 487 u_long addr, entry = 0, proghdr = 0; 488 int error, i; 489 const char *interp = NULL; 490 Elf_Brandinfo *brand_info; 491 char *path; 492 493 /* 494 * Do we have a valid ELF header ? 495 */ 496 if (elf_check_header(hdr) != 0 || hdr->e_type != ET_EXEC) 497 return -1; 498 499 /* 500 * From here on down, we return an errno, not -1, as we've 501 * detected an ELF file. 502 */ 503 504 if ((hdr->e_phoff > PAGE_SIZE) || 505 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) { 506 /* Only support headers in first page for now */ 507 return ENOEXEC; 508 } 509 phdr = (const Elf_Phdr*)(imgp->image_header + hdr->e_phoff); 510 511 /* 512 * From this point on, we may have resources that need to be freed. 513 */ 514 515 if ((error = exec_extract_strings(imgp)) != 0) 516 goto fail; 517 518 exec_new_vmspace(imgp); 519 520 /* 521 * Yeah, I'm paranoid. There is every reason in the world to get 522 * VTEXT now since from here on out, there are places we can have 523 * a context switch. Better safe than sorry; I really don't want 524 * the file to change while it's being loaded. 525 */ 526 lwkt_gettoken(&imgp->vp->v_interlock); 527 imgp->vp->v_flag |= VTEXT; 528 lwkt_reltoken(&imgp->vp->v_interlock); 529 530 vmspace = imgp->proc->p_vmspace; 531 532 for (i = 0; i < hdr->e_phnum; i++) { 533 switch(phdr[i].p_type) { 534 535 case PT_LOAD: /* Loadable segment */ 536 prot = 0; 537 if (phdr[i].p_flags & PF_X) 538 prot |= VM_PROT_EXECUTE; 539 if (phdr[i].p_flags & PF_W) 540 prot |= VM_PROT_WRITE; 541 if (phdr[i].p_flags & PF_R) 542 prot |= VM_PROT_READ; 543 544 if ((error = elf_load_section(imgp->proc, 545 vmspace, imgp->vp, 546 phdr[i].p_offset, 547 (caddr_t)phdr[i].p_vaddr, 548 phdr[i].p_memsz, 549 phdr[i].p_filesz, prot)) != 0) 550 goto fail; 551 552 seg_addr = trunc_page(phdr[i].p_vaddr); 553 seg_size = round_page(phdr[i].p_memsz + 554 phdr[i].p_vaddr - seg_addr); 555 556 /* 557 * Is this .text or .data? We can't use 558 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the 559 * alpha terribly and possibly does other bad 560 * things so we stick to the old way of figuring 561 * it out: If the segment contains the program 562 * entry point, it's a text segment, otherwise it 563 * is a data segment. 564 * 565 * Note that obreak() assumes that data_addr + 566 * data_size == end of data load area, and the ELF 567 * file format expects segments to be sorted by 568 * address. If multiple data segments exist, the 569 * last one will be used. 570 */ 571 if (hdr->e_entry >= phdr[i].p_vaddr && 572 hdr->e_entry < (phdr[i].p_vaddr + 573 phdr[i].p_memsz)) { 574 text_size = seg_size; 575 text_addr = seg_addr; 576 entry = (u_long)hdr->e_entry; 577 } else { 578 data_size = seg_size; 579 data_addr = seg_addr; 580 } 581 total_size += seg_size; 582 583 /* 584 * Check limits. It should be safe to check the 585 * limits after loading the segment since we do 586 * not actually fault in all the segment's pages. 587 */ 588 if (data_size > 589 imgp->proc->p_rlimit[RLIMIT_DATA].rlim_cur || 590 text_size > maxtsiz || 591 total_size > 592 imgp->proc->p_rlimit[RLIMIT_VMEM].rlim_cur) { 593 error = ENOMEM; 594 goto fail; 595 } 596 break; 597 case PT_INTERP: /* Path to interpreter */ 598 if (phdr[i].p_filesz > MAXPATHLEN || 599 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) { 600 error = ENOEXEC; 601 goto fail; 602 } 603 interp = imgp->image_header + phdr[i].p_offset; 604 break; 605 case PT_PHDR: /* Program header table info */ 606 proghdr = phdr[i].p_vaddr; 607 break; 608 default: 609 break; 610 } 611 } 612 613 vmspace->vm_tsize = text_size >> PAGE_SHIFT; 614 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; 615 vmspace->vm_dsize = data_size >> PAGE_SHIFT; 616 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; 617 618 addr = ELF_RTLD_ADDR(vmspace); 619 620 imgp->entry_addr = entry; 621 622 brand_info = NULL; 623 624 /* We support three types of branding -- (1) the ELF EI_OSABI field 625 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string 626 * branding w/in the ELF header, and (3) path of the `interp_path' 627 * field. We should also look for an ".note.ABI-tag" ELF section now 628 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones. 629 */ 630 631 /* If the executable has a brand, search for it in the brand list. */ 632 if (brand_info == NULL) { 633 for (i = 0; i < MAX_BRANDS; i++) { 634 Elf_Brandinfo *bi = elf_brand_list[i]; 635 636 if (bi != NULL && 637 (hdr->e_ident[EI_OSABI] == bi->brand 638 || 0 == 639 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND], 640 bi->compat_3_brand, strlen(bi->compat_3_brand)))) { 641 brand_info = bi; 642 break; 643 } 644 } 645 } 646 647 /* Lacking a known brand, search for a recognized interpreter. */ 648 if (brand_info == NULL && interp != NULL) { 649 for (i = 0; i < MAX_BRANDS; i++) { 650 Elf_Brandinfo *bi = elf_brand_list[i]; 651 652 if (bi != NULL && 653 strcmp(interp, bi->interp_path) == 0) { 654 brand_info = bi; 655 break; 656 } 657 } 658 } 659 660 /* Lacking a recognized interpreter, try the default brand */ 661 if (brand_info == NULL) { 662 for (i = 0; i < MAX_BRANDS; i++) { 663 Elf_Brandinfo *bi = elf_brand_list[i]; 664 665 if (bi != NULL && fallback_elf_brand == bi->brand) { 666 brand_info = bi; 667 break; 668 } 669 } 670 } 671 672 if (brand_info == NULL) { 673 uprintf("ELF binary type \"%u\" not known.\n", 674 hdr->e_ident[EI_OSABI]); 675 error = ENOEXEC; 676 goto fail; 677 } 678 679 imgp->proc->p_sysent = brand_info->sysvec; 680 if (interp != NULL) { 681 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 682 snprintf(path, MAXPATHLEN, "%s%s", 683 brand_info->emul_path, interp); 684 if ((error = elf_load_file(imgp->proc, path, &addr, 685 &imgp->entry_addr)) != 0) { 686 if ((error = elf_load_file(imgp->proc, interp, &addr, 687 &imgp->entry_addr)) != 0) { 688 uprintf("ELF interpreter %s not found\n", path); 689 free(path, M_TEMP); 690 goto fail; 691 } 692 } 693 free(path, M_TEMP); 694 } 695 696 /* 697 * Construct auxargs table (used by the fixup routine) 698 */ 699 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); 700 elf_auxargs->execfd = -1; 701 elf_auxargs->phdr = proghdr; 702 elf_auxargs->phent = hdr->e_phentsize; 703 elf_auxargs->phnum = hdr->e_phnum; 704 elf_auxargs->pagesz = PAGE_SIZE; 705 elf_auxargs->base = addr; 706 elf_auxargs->flags = 0; 707 elf_auxargs->entry = entry; 708 elf_auxargs->trace = elf_trace; 709 710 imgp->auxargs = elf_auxargs; 711 imgp->interpreted = 0; 712 713 fail: 714 return error; 715 } 716 717 static int 718 elf_freebsd_fixup(register_t **stack_base, struct image_params *imgp) 719 { 720 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; 721 register_t *pos; 722 723 pos = *stack_base + (imgp->argc + imgp->envc + 2); 724 725 if (args->trace) { 726 AUXARGS_ENTRY(pos, AT_DEBUG, 1); 727 } 728 if (args->execfd != -1) { 729 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); 730 } 731 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); 732 AUXARGS_ENTRY(pos, AT_PHENT, args->phent); 733 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); 734 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); 735 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); 736 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); 737 AUXARGS_ENTRY(pos, AT_BASE, args->base); 738 AUXARGS_ENTRY(pos, AT_NULL, 0); 739 740 free(imgp->auxargs, M_TEMP); 741 imgp->auxargs = NULL; 742 743 (*stack_base)--; 744 suword(*stack_base, (long) imgp->argc); 745 return 0; 746 } 747 748 /* 749 * Code for generating ELF core dumps. 750 */ 751 752 typedef void (*segment_callback) (vm_map_entry_t, void *); 753 754 /* Closure for cb_put_phdr(). */ 755 struct phdr_closure { 756 Elf_Phdr *phdr; /* Program header to fill in */ 757 Elf_Off offset; /* Offset of segment in core file */ 758 }; 759 760 /* Closure for cb_size_segment(). */ 761 struct sseg_closure { 762 int count; /* Count of writable segments. */ 763 size_t size; /* Total size of all writable segments. */ 764 }; 765 766 static void cb_put_phdr (vm_map_entry_t, void *); 767 static void cb_size_segment (vm_map_entry_t, void *); 768 static void each_writable_segment (struct proc *, segment_callback, 769 void *); 770 static int elf_corehdr (struct proc *, struct vnode *, struct ucred *, 771 int, void *, size_t); 772 static void elf_puthdr (struct proc *, void *, size_t *, 773 const prstatus_t *, const prfpregset_t *, const prpsinfo_t *, int); 774 static void elf_putnote (void *, size_t *, const char *, int, 775 const void *, size_t); 776 777 extern int osreldate; 778 779 int 780 elf_coredump(p, vp, limit) 781 struct proc *p; 782 struct vnode *vp; 783 off_t limit; 784 { 785 struct ucred *cred = p->p_ucred; 786 struct thread *td = p->p_thread; 787 int error = 0; 788 struct sseg_closure seginfo; 789 void *hdr; 790 size_t hdrsize; 791 792 /* Size the program segments. */ 793 seginfo.count = 0; 794 seginfo.size = 0; 795 each_writable_segment(p, cb_size_segment, &seginfo); 796 797 /* 798 * Calculate the size of the core file header area by making 799 * a dry run of generating it. Nothing is written, but the 800 * size is calculated. 801 */ 802 hdrsize = 0; 803 elf_puthdr((struct proc *)NULL, (void *)NULL, &hdrsize, 804 (const prstatus_t *)NULL, (const prfpregset_t *)NULL, 805 (const prpsinfo_t *)NULL, seginfo.count); 806 807 if (hdrsize + seginfo.size >= limit) 808 return (EFAULT); 809 810 /* 811 * Allocate memory for building the header, fill it up, 812 * and write it out. 813 */ 814 hdr = malloc(hdrsize, M_TEMP, M_WAITOK); 815 if (hdr == NULL) { 816 return EINVAL; 817 } 818 error = elf_corehdr(p, vp, cred, seginfo.count, hdr, hdrsize); 819 820 /* Write the contents of all of the writable segments. */ 821 if (error == 0) { 822 Elf_Phdr *php; 823 off_t offset; 824 int i; 825 826 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; 827 offset = hdrsize; 828 for (i = 0; i < seginfo.count; i++) { 829 error = vn_rdwr_inchunks(UIO_WRITE, vp, 830 (caddr_t)php->p_vaddr, 831 php->p_filesz, offset, UIO_USERSPACE, 832 IO_UNIT | IO_DIRECT | IO_CORE, cred, 833 (int *)NULL, td); 834 if (error != 0) 835 break; 836 offset += php->p_filesz; 837 php++; 838 } 839 } 840 free(hdr, M_TEMP); 841 842 return error; 843 } 844 845 /* 846 * A callback for each_writable_segment() to write out the segment's 847 * program header entry. 848 */ 849 static void 850 cb_put_phdr(entry, closure) 851 vm_map_entry_t entry; 852 void *closure; 853 { 854 struct phdr_closure *phc = (struct phdr_closure *)closure; 855 Elf_Phdr *phdr = phc->phdr; 856 857 phc->offset = round_page(phc->offset); 858 859 phdr->p_type = PT_LOAD; 860 phdr->p_offset = phc->offset; 861 phdr->p_vaddr = entry->start; 862 phdr->p_paddr = 0; 863 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start; 864 phdr->p_align = PAGE_SIZE; 865 phdr->p_flags = 0; 866 if (entry->protection & VM_PROT_READ) 867 phdr->p_flags |= PF_R; 868 if (entry->protection & VM_PROT_WRITE) 869 phdr->p_flags |= PF_W; 870 if (entry->protection & VM_PROT_EXECUTE) 871 phdr->p_flags |= PF_X; 872 873 phc->offset += phdr->p_filesz; 874 phc->phdr++; 875 } 876 877 /* 878 * A callback for each_writable_segment() to gather information about 879 * the number of segments and their total size. 880 */ 881 static void 882 cb_size_segment(entry, closure) 883 vm_map_entry_t entry; 884 void *closure; 885 { 886 struct sseg_closure *ssc = (struct sseg_closure *)closure; 887 888 ssc->count++; 889 ssc->size += entry->end - entry->start; 890 } 891 892 /* 893 * For each writable segment in the process's memory map, call the given 894 * function with a pointer to the map entry and some arbitrary 895 * caller-supplied data. 896 */ 897 static void 898 each_writable_segment(p, func, closure) 899 struct proc *p; 900 segment_callback func; 901 void *closure; 902 { 903 vm_map_t map = &p->p_vmspace->vm_map; 904 vm_map_entry_t entry; 905 906 for (entry = map->header.next; entry != &map->header; 907 entry = entry->next) { 908 vm_object_t obj; 909 910 /* 911 * Don't dump inaccessible mappings, deal with legacy 912 * coredump mode. 913 * 914 * Note that read-only segments related to the elf binary 915 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer 916 * need to arbitrarily ignore such segments. 917 */ 918 if (elf_legacy_coredump) { 919 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW) 920 continue; 921 } else { 922 if ((entry->protection & VM_PROT_ALL) == 0) 923 continue; 924 } 925 926 /* 927 * Dont include memory segment in the coredump if 928 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in 929 * madvise(2). Do not dump submaps (i.e. parts of the 930 * kernel map). 931 */ 932 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP)) 933 continue; 934 935 if ((obj = entry->object.vm_object) == NULL) 936 continue; 937 938 /* Find the deepest backing object. */ 939 while (obj->backing_object != NULL) 940 obj = obj->backing_object; 941 942 /* Ignore memory-mapped devices and such things. */ 943 if (obj->type != OBJT_DEFAULT && 944 obj->type != OBJT_SWAP && 945 obj->type != OBJT_VNODE) 946 continue; 947 948 (*func)(entry, closure); 949 } 950 } 951 952 /* 953 * Write the core file header to the file, including padding up to 954 * the page boundary. 955 */ 956 static int 957 elf_corehdr(p, vp, cred, numsegs, hdr, hdrsize) 958 struct proc *p; 959 struct vnode *vp; 960 struct ucred *cred; 961 int numsegs; 962 size_t hdrsize; 963 void *hdr; 964 { 965 struct { 966 prstatus_t status; 967 prfpregset_t fpregset; 968 prpsinfo_t psinfo; 969 } *tempdata; 970 size_t off; 971 prstatus_t *status; 972 prfpregset_t *fpregset; 973 prpsinfo_t *psinfo; 974 struct thread *td = p->p_thread; 975 976 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO | M_WAITOK); 977 status = &tempdata->status; 978 fpregset = &tempdata->fpregset; 979 psinfo = &tempdata->psinfo; 980 981 /* Gather the information for the header. */ 982 status->pr_version = PRSTATUS_VERSION; 983 status->pr_statussz = sizeof(prstatus_t); 984 status->pr_gregsetsz = sizeof(gregset_t); 985 status->pr_fpregsetsz = sizeof(fpregset_t); 986 status->pr_osreldate = osreldate; 987 status->pr_cursig = p->p_sig; 988 status->pr_pid = p->p_pid; 989 fill_regs(p, &status->pr_reg); 990 991 fill_fpregs(p, fpregset); 992 993 psinfo->pr_version = PRPSINFO_VERSION; 994 psinfo->pr_psinfosz = sizeof(prpsinfo_t); 995 strncpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname) - 1); 996 997 /* XXX - We don't fill in the command line arguments properly yet. */ 998 strncpy(psinfo->pr_psargs, p->p_comm, PRARGSZ); 999 1000 /* Fill in the header. */ 1001 bzero(hdr, hdrsize); 1002 off = 0; 1003 elf_puthdr(p, hdr, &off, status, fpregset, psinfo, numsegs); 1004 1005 free(tempdata, M_TEMP); 1006 1007 /* Write it to the core file. */ 1008 return vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0, 1009 UIO_SYSSPACE, IO_UNIT | IO_DIRECT | IO_CORE, cred, NULL, td); 1010 } 1011 1012 static void 1013 elf_puthdr(struct proc *p, void *dst, size_t *off, const prstatus_t *status, 1014 const prfpregset_t *fpregset, const prpsinfo_t *psinfo, int numsegs) 1015 { 1016 size_t ehoff; 1017 size_t phoff; 1018 size_t noteoff; 1019 size_t notesz; 1020 1021 ehoff = *off; 1022 *off += sizeof(Elf_Ehdr); 1023 1024 phoff = *off; 1025 *off += (numsegs + 1) * sizeof(Elf_Phdr); 1026 1027 noteoff = *off; 1028 elf_putnote(dst, off, "FreeBSD", NT_PRSTATUS, status, 1029 sizeof *status); 1030 elf_putnote(dst, off, "FreeBSD", NT_FPREGSET, fpregset, 1031 sizeof *fpregset); 1032 elf_putnote(dst, off, "FreeBSD", NT_PRPSINFO, psinfo, 1033 sizeof *psinfo); 1034 notesz = *off - noteoff; 1035 1036 /* Align up to a page boundary for the program segments. */ 1037 *off = round_page(*off); 1038 1039 if (dst != NULL) { 1040 Elf_Ehdr *ehdr; 1041 Elf_Phdr *phdr; 1042 struct phdr_closure phc; 1043 1044 /* 1045 * Fill in the ELF header. 1046 */ 1047 ehdr = (Elf_Ehdr *)((char *)dst + ehoff); 1048 ehdr->e_ident[EI_MAG0] = ELFMAG0; 1049 ehdr->e_ident[EI_MAG1] = ELFMAG1; 1050 ehdr->e_ident[EI_MAG2] = ELFMAG2; 1051 ehdr->e_ident[EI_MAG3] = ELFMAG3; 1052 ehdr->e_ident[EI_CLASS] = ELF_CLASS; 1053 ehdr->e_ident[EI_DATA] = ELF_DATA; 1054 ehdr->e_ident[EI_VERSION] = EV_CURRENT; 1055 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; 1056 ehdr->e_ident[EI_ABIVERSION] = 0; 1057 ehdr->e_ident[EI_PAD] = 0; 1058 ehdr->e_type = ET_CORE; 1059 ehdr->e_machine = ELF_ARCH; 1060 ehdr->e_version = EV_CURRENT; 1061 ehdr->e_entry = 0; 1062 ehdr->e_phoff = phoff; 1063 ehdr->e_flags = 0; 1064 ehdr->e_ehsize = sizeof(Elf_Ehdr); 1065 ehdr->e_phentsize = sizeof(Elf_Phdr); 1066 ehdr->e_phnum = numsegs + 1; 1067 ehdr->e_shentsize = sizeof(Elf_Shdr); 1068 ehdr->e_shnum = 0; 1069 ehdr->e_shstrndx = SHN_UNDEF; 1070 1071 /* 1072 * Fill in the program header entries. 1073 */ 1074 phdr = (Elf_Phdr *)((char *)dst + phoff); 1075 1076 /* The note segement. */ 1077 phdr->p_type = PT_NOTE; 1078 phdr->p_offset = noteoff; 1079 phdr->p_vaddr = 0; 1080 phdr->p_paddr = 0; 1081 phdr->p_filesz = notesz; 1082 phdr->p_memsz = 0; 1083 phdr->p_flags = 0; 1084 phdr->p_align = 0; 1085 phdr++; 1086 1087 /* All the writable segments from the program. */ 1088 phc.phdr = phdr; 1089 phc.offset = *off; 1090 each_writable_segment(p, cb_put_phdr, &phc); 1091 } 1092 } 1093 1094 static void 1095 elf_putnote(void *dst, size_t *off, const char *name, int type, 1096 const void *desc, size_t descsz) 1097 { 1098 Elf_Note note; 1099 1100 note.n_namesz = strlen(name) + 1; 1101 note.n_descsz = descsz; 1102 note.n_type = type; 1103 if (dst != NULL) 1104 bcopy(¬e, (char *)dst + *off, sizeof note); 1105 *off += sizeof note; 1106 if (dst != NULL) 1107 bcopy(name, (char *)dst + *off, note.n_namesz); 1108 *off += roundup2(note.n_namesz, sizeof(Elf_Size)); 1109 if (dst != NULL) 1110 bcopy(desc, (char *)dst + *off, note.n_descsz); 1111 *off += roundup2(note.n_descsz, sizeof(Elf_Size)); 1112 } 1113 1114 /* 1115 * Tell kern_execve.c about it, with a little help from the linker. 1116 */ 1117 static struct execsw elf_execsw = {exec_elf_imgact, "ELF"}; 1118 EXEC_SET(elf, elf_execsw); 1119