1 /*- 2 * Copyright 1996-1998 John D. Polstra. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <sys/param.h> 29 #include <sys/mman.h> 30 #include <sys/stat.h> 31 32 #include <errno.h> 33 #include <stddef.h> 34 #include <stdlib.h> 35 #include <string.h> 36 #include <unistd.h> 37 38 #include "debug.h" 39 #include "rtld.h" 40 41 static Elf_Ehdr *get_elf_header(int, const char *); 42 static int convert_prot(int); /* Elf flags -> mmap protection */ 43 static int convert_flags(int); /* Elf flags -> mmap flags */ 44 45 /* 46 * Map a shared object into memory. The "fd" argument is a file descriptor, 47 * which must be open on the object and positioned at its beginning. 48 * The "path" argument is a pathname that is used only for error messages. 49 * 50 * The return value is a pointer to a newly-allocated Obj_Entry structure 51 * for the shared object. Returns NULL on failure. 52 */ 53 Obj_Entry * 54 map_object(int fd, const char *path, const struct stat *sb) 55 { 56 Obj_Entry *obj; 57 Elf_Ehdr *hdr; 58 int i; 59 Elf_Phdr *phdr; 60 Elf_Phdr *phlimit; 61 Elf_Phdr **segs; 62 int nsegs; 63 Elf_Phdr *phdyn; 64 Elf_Phdr *phinterp; 65 Elf_Phdr *phtls; 66 caddr_t mapbase; 67 caddr_t shlib_base; 68 size_t mapsize; 69 Elf_Addr base_vaddr; 70 Elf_Addr base_vlimit; 71 caddr_t base_addr; 72 Elf_Off data_offset; 73 Elf_Addr data_vaddr; 74 Elf_Addr data_vlimit; 75 caddr_t data_addr; 76 int data_prot; 77 int data_flags; 78 Elf_Addr clear_vaddr; 79 caddr_t clear_addr; 80 caddr_t clear_page; 81 Elf_Addr phdr_vaddr; 82 size_t nclear, phsize; 83 Elf_Addr bss_vaddr; 84 Elf_Addr bss_vlimit; 85 caddr_t bss_addr; 86 Elf_Word stack_flags; 87 Elf_Addr relro_page; 88 size_t relro_size; 89 Elf_Addr note_start; 90 Elf_Addr note_end; 91 92 hdr = get_elf_header(fd, path); 93 if (hdr == NULL) 94 return (NULL); 95 96 if (__ld_sharedlib_base) { 97 shlib_base = (void *)(intptr_t)strtoul(__ld_sharedlib_base, NULL, 0); 98 } else { 99 shlib_base = NULL; 100 } 101 102 /* 103 * Scan the program header entries, and save key information. 104 * 105 * We expect that the loadable segments are ordered by load address. 106 */ 107 phdr = (Elf_Phdr *) ((char *)hdr + hdr->e_phoff); 108 phsize = hdr->e_phnum * sizeof (phdr[0]); 109 phlimit = phdr + hdr->e_phnum; 110 nsegs = -1; 111 phdyn = phinterp = phtls = NULL; 112 phdr_vaddr = 0; 113 relro_page = 0; 114 relro_size = 0; 115 note_start = 0; 116 note_end = 0; 117 segs = alloca(sizeof(segs[0]) * hdr->e_phnum); 118 stack_flags = RTLD_DEFAULT_STACK_PF_EXEC | PF_R | PF_W; 119 while (phdr < phlimit) { 120 switch (phdr->p_type) { 121 122 case PT_INTERP: 123 phinterp = phdr; 124 break; 125 126 case PT_LOAD: 127 segs[++nsegs] = phdr; 128 if ((segs[nsegs]->p_align & (PAGE_SIZE - 1)) != 0) { 129 _rtld_error("%s: PT_LOAD segment %d not page-aligned", 130 path, nsegs); 131 goto error; 132 } 133 break; 134 135 case PT_PHDR: 136 phdr_vaddr = phdr->p_vaddr; 137 phsize = phdr->p_memsz; 138 break; 139 140 case PT_DYNAMIC: 141 phdyn = phdr; 142 break; 143 144 case PT_TLS: 145 phtls = phdr; 146 break; 147 148 case PT_GNU_STACK: 149 stack_flags = phdr->p_flags; 150 break; 151 152 case PT_GNU_RELRO: 153 relro_page = phdr->p_vaddr; 154 relro_size = phdr->p_memsz; 155 break; 156 157 case PT_NOTE: 158 if (phdr->p_offset > PAGE_SIZE || 159 phdr->p_offset + phdr->p_filesz > PAGE_SIZE) 160 break; 161 note_start = (Elf_Addr)(char *)hdr + phdr->p_offset; 162 note_end = note_start + phdr->p_filesz; 163 break; 164 } 165 166 ++phdr; 167 } 168 if (phdyn == NULL) { 169 _rtld_error("%s: object is not dynamically-linked", path); 170 goto error; 171 } 172 173 if (nsegs < 0) { 174 _rtld_error("%s: too few PT_LOAD segments", path); 175 goto error; 176 } 177 178 /* 179 * Map the entire address space of the object, to stake out our 180 * contiguous region, and to establish the base address for relocation. 181 */ 182 base_vaddr = trunc_page(segs[0]->p_vaddr); 183 base_vlimit = round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz); 184 mapsize = base_vlimit - base_vaddr; 185 base_addr = (caddr_t) base_vaddr; 186 187 if (base_addr == NULL && shlib_base) { 188 size_t limit = 1024 * 256 * 1024; 189 size_t offset; 190 191 for (offset = 0; offset < limit; offset += 256 * 1024) { 192 mapbase = mmap(shlib_base + offset, mapsize, 193 PROT_NONE, 194 MAP_ANON | MAP_PRIVATE | MAP_NOCORE | 195 MAP_TRYFIXED, 196 -1, 0); 197 if (mapbase != MAP_FAILED) 198 break; 199 } 200 } else { 201 mapbase = mmap(base_addr, mapsize, 202 PROT_NONE, 203 MAP_ANON | MAP_PRIVATE | MAP_NOCORE, 204 -1, 0); 205 } 206 if (mapbase == (caddr_t) -1) { 207 _rtld_error("%s: mmap of entire address space failed: %s", 208 path, rtld_strerror(errno)); 209 goto error; 210 } 211 if (base_addr != NULL && mapbase != base_addr) { 212 _rtld_error("%s: mmap returned wrong address: wanted %p, got %p", 213 path, base_addr, mapbase); 214 goto error1; 215 } 216 217 for (i = 0; i <= nsegs; i++) { 218 /* Overlay the segment onto the proper region. */ 219 data_offset = trunc_page(segs[i]->p_offset); 220 data_vaddr = trunc_page(segs[i]->p_vaddr); 221 data_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_filesz); 222 data_addr = mapbase + (data_vaddr - base_vaddr); 223 data_prot = convert_prot(segs[i]->p_flags); 224 data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED; 225 if (mmap(data_addr, data_vlimit - data_vaddr, data_prot, 226 data_flags, fd, data_offset) == (caddr_t) -1) { 227 _rtld_error("%s: mmap of data failed: %s", path, 228 rtld_strerror(errno)); 229 goto error1; 230 } 231 232 /* Do BSS setup */ 233 if (segs[i]->p_filesz != segs[i]->p_memsz) { 234 235 /* Clear any BSS in the last page of the segment. */ 236 clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz; 237 clear_addr = mapbase + (clear_vaddr - base_vaddr); 238 clear_page = mapbase + (trunc_page(clear_vaddr) - base_vaddr); 239 240 if ((nclear = data_vlimit - clear_vaddr) > 0) { 241 /* Make sure the end of the segment is writable */ 242 if ((data_prot & PROT_WRITE) == 0 && -1 == 243 mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE)) { 244 _rtld_error("%s: mprotect failed: %s", path, 245 rtld_strerror(errno)); 246 goto error1; 247 } 248 249 memset(clear_addr, 0, nclear); 250 251 /* 252 * reset the data protection back, enable the segment to be 253 * coredumped since we modified it. 254 */ 255 if ((data_prot & PROT_WRITE) == 0) { 256 madvise(clear_page, PAGE_SIZE, MADV_CORE); 257 mprotect(clear_page, PAGE_SIZE, data_prot); 258 } 259 } 260 261 /* Overlay the BSS segment onto the proper region. */ 262 bss_vaddr = data_vlimit; 263 bss_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_memsz); 264 bss_addr = mapbase + (bss_vaddr - base_vaddr); 265 if (bss_vlimit > bss_vaddr) { /* There is something to do */ 266 if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot, 267 data_flags | MAP_ANON, -1, 0) == (caddr_t)-1) { 268 _rtld_error("%s: mmap of bss failed: %s", path, 269 rtld_strerror(errno)); 270 goto error1; 271 } 272 } 273 } 274 275 if (phdr_vaddr == 0 && data_offset <= hdr->e_phoff && 276 (data_vlimit - data_vaddr + data_offset) >= 277 (hdr->e_phoff + hdr->e_phnum * sizeof (Elf_Phdr))) { 278 phdr_vaddr = data_vaddr + hdr->e_phoff - data_offset; 279 } 280 } 281 282 obj = obj_new(); 283 if (sb != NULL) { 284 obj->dev = sb->st_dev; 285 obj->ino = sb->st_ino; 286 } 287 obj->mapbase = mapbase; 288 obj->mapsize = mapsize; 289 obj->textsize = round_page(segs[0]->p_vaddr + segs[0]->p_memsz) - 290 base_vaddr; 291 obj->vaddrbase = base_vaddr; 292 obj->relocbase = mapbase - base_vaddr; 293 obj->dynamic = (const Elf_Dyn *) (obj->relocbase + phdyn->p_vaddr); 294 if (hdr->e_entry != 0) 295 obj->entry = (caddr_t) (obj->relocbase + hdr->e_entry); 296 if (phdr_vaddr != 0) { 297 obj->phdr = (const Elf_Phdr *) (obj->relocbase + phdr_vaddr); 298 } else { 299 obj->phdr = malloc(phsize); 300 if (obj->phdr == NULL) { 301 obj_free(obj); 302 _rtld_error("%s: cannot allocate program header", path); 303 goto error1; 304 } 305 memcpy((char *)obj->phdr, (char *)hdr + hdr->e_phoff, phsize); 306 obj->phdr_alloc = true; 307 } 308 obj->phsize = phsize; 309 if (phinterp != NULL) 310 obj->interp = (const char *) (obj->relocbase + phinterp->p_vaddr); 311 if (phtls != NULL) { 312 tls_dtv_generation++; 313 obj->tlsindex = ++tls_max_index; 314 obj->tlssize = phtls->p_memsz; 315 obj->tlsalign = phtls->p_align; 316 obj->tlsinitsize = phtls->p_filesz; 317 obj->tlsinit = mapbase + phtls->p_vaddr; 318 } 319 obj->stack_flags = stack_flags; 320 if (relro_size) { 321 obj->relro_page = obj->relocbase + trunc_page(relro_page); 322 obj->relro_size = round_page(relro_size); 323 } 324 if (note_start < note_end) 325 digest_notes(obj, note_start, note_end); 326 munmap(hdr, PAGE_SIZE); 327 return (obj); 328 329 error1: 330 munmap(mapbase, mapsize); 331 error: 332 munmap(hdr, PAGE_SIZE); 333 return (NULL); 334 } 335 336 static Elf_Ehdr * 337 get_elf_header(int fd, const char *path) 338 { 339 Elf_Ehdr *hdr; 340 341 /* DragonFly mmap does not have MAP_PREFAULT_READ */ 342 hdr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0); 343 if (hdr == (Elf_Ehdr *)MAP_FAILED) { 344 _rtld_error("%s: read error: %s", path, rtld_strerror(errno)); 345 return (NULL); 346 } 347 348 /* Make sure the file is valid */ 349 if (!IS_ELF(*hdr)) { 350 _rtld_error("%s: invalid file format", path); 351 goto error; 352 } 353 if (hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 354 hdr->e_ident[EI_DATA] != ELF_TARG_DATA) { 355 _rtld_error("%s: unsupported file layout", path); 356 goto error; 357 } 358 if (hdr->e_ident[EI_VERSION] != EV_CURRENT || 359 hdr->e_version != EV_CURRENT) { 360 _rtld_error("%s: unsupported file version", path); 361 goto error; 362 } 363 if (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN) { 364 _rtld_error("%s: unsupported file type", path); 365 goto error; 366 } 367 if (hdr->e_machine != ELF_TARG_MACH) { 368 _rtld_error("%s: unsupported machine", path); 369 goto error; 370 } 371 372 /* 373 * We rely on the program header being in the first page. This is 374 * not strictly required by the ABI specification, but it seems to 375 * always true in practice. And, it simplifies things considerably. 376 */ 377 if (hdr->e_phentsize != sizeof(Elf_Phdr)) { 378 _rtld_error( 379 "%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)", path); 380 goto error; 381 } 382 if (hdr->e_phoff + hdr->e_phnum * sizeof(Elf_Phdr) > 383 (size_t)PAGE_SIZE) { 384 _rtld_error("%s: program header too large", path); 385 goto error; 386 } 387 return (hdr); 388 389 error: 390 munmap(hdr, PAGE_SIZE); 391 return (NULL); 392 } 393 394 void 395 obj_free(Obj_Entry *obj) 396 { 397 Objlist_Entry *elm; 398 399 if (obj->tls_done) 400 free_tls_offset(obj); 401 while (obj->needed != NULL) { 402 Needed_Entry *needed = obj->needed; 403 obj->needed = needed->next; 404 free(needed); 405 } 406 while (!STAILQ_EMPTY(&obj->names)) { 407 Name_Entry *entry = STAILQ_FIRST(&obj->names); 408 STAILQ_REMOVE_HEAD(&obj->names, link); 409 free(entry); 410 } 411 while (!STAILQ_EMPTY(&obj->dldags)) { 412 elm = STAILQ_FIRST(&obj->dldags); 413 STAILQ_REMOVE_HEAD(&obj->dldags, link); 414 free(elm); 415 } 416 while (!STAILQ_EMPTY(&obj->dagmembers)) { 417 elm = STAILQ_FIRST(&obj->dagmembers); 418 STAILQ_REMOVE_HEAD(&obj->dagmembers, link); 419 free(elm); 420 } 421 if (obj->vertab) 422 free(obj->vertab); 423 if (obj->origin_path) 424 free(obj->origin_path); 425 if (obj->z_origin) 426 free(obj->rpath); 427 if (obj->priv) 428 free(obj->priv); 429 if (obj->path) 430 free(obj->path); 431 if (obj->phdr_alloc) 432 free((void *)obj->phdr); 433 free(obj); 434 } 435 436 Obj_Entry * 437 obj_new(void) 438 { 439 Obj_Entry *obj; 440 441 obj = CNEW(Obj_Entry); 442 STAILQ_INIT(&obj->dldags); 443 STAILQ_INIT(&obj->dagmembers); 444 STAILQ_INIT(&obj->names); 445 return obj; 446 } 447 448 /* 449 * Given a set of ELF protection flags, return the corresponding protection 450 * flags for MMAP. 451 */ 452 static int 453 convert_prot(int elfflags) 454 { 455 int prot = 0; 456 if (elfflags & PF_R) 457 prot |= PROT_READ; 458 if (elfflags & PF_W) 459 prot |= PROT_WRITE; 460 if (elfflags & PF_X) 461 prot |= PROT_EXEC; 462 return prot; 463 } 464 465 static int 466 convert_flags(int elfflags) 467 { 468 int flags = MAP_PRIVATE; /* All mappings are private */ 469 470 /* 471 * Readonly mappings are marked "MAP_NOCORE", because they can be 472 * reconstructed by a debugger. 473 */ 474 if (!(elfflags & PF_W)) 475 flags |= MAP_NOCORE; 476 return flags; 477 } 478