1 /* $OpenBSD: loader.c,v 1.223 2024/01/22 02:08:31 deraadt Exp $ */
2
3 /*
4 * Copyright (c) 1998 Per Fogelstrom, Opsycon AB
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 */
28
29 #define _DYN_LOADER
30
31 #include <sys/types.h>
32 #include <sys/mman.h>
33 #include <sys/syscall.h>
34 #include <sys/exec.h>
35 #ifdef __i386__
36 # include <machine/vmparam.h>
37 #endif
38 #include <string.h>
39 #include <link.h>
40 #include <limits.h> /* NAME_MAX */
41 #include <dlfcn.h>
42 #include <tib.h>
43
44 #include "syscall.h"
45 #include "util.h"
46 #include "resolve.h"
47 #include "path.h"
48 #include "sod.h"
49
50 /*
51 * Local decls.
52 */
53 unsigned long _dl_boot(const char **, char **, const long, long *) __boot;
54 void _dl_debug_state(void);
55 void _dl_setup_env(const char *_argv0, char **_envp) __boot;
56 void _dl_dtors(void);
57 void _dl_dopreload(char *_paths) __boot;
58 void _dl_fixup_user_env(void) __boot;
59 void _dl_call_preinit(elf_object_t *) __boot;
60 void _dl_call_init_recurse(elf_object_t *object, int initfirst);
61 void _dl_clean_boot(void);
62 static inline void unprotect_if_textrel(elf_object_t *_object);
63 static inline void reprotect_if_textrel(elf_object_t *_object);
64 static void _dl_rreloc(elf_object_t *_object);
65
66 int _dl_pagesz __relro = 4096;
67 int _dl_bindnow __relro = 0;
68 int _dl_debug __relro = 0;
69 int _dl_trust __relro = 0;
70 char **_dl_libpath __relro = NULL;
71 const char **_dl_argv __relro = NULL;
72 int _dl_argc __relro = 0;
73 const char *_dl_libcname;
74
75 char *_dl_preload __boot_data = NULL;
76 char *_dl_tracefmt1 __boot_data = NULL;
77 char *_dl_tracefmt2 __boot_data = NULL;
78 char *_dl_traceprog __boot_data = NULL;
79 void *_dl_exec_hint __boot_data = NULL;
80
81 char **environ = NULL;
82 char *__progname = NULL;
83
84 int _dl_traceld;
85 struct r_debug *_dl_debug_map;
86
87 static dl_cb_cb _dl_cb_cb;
88 const struct dl_cb_0 callbacks_0 = {
89 .dl_allocate_tib = &_dl_allocate_tib,
90 .dl_free_tib = &_dl_free_tib,
91 #if DO_CLEAN_BOOT
92 .dl_clean_boot = &_dl_clean_boot,
93 #endif
94 .dlopen = &dlopen,
95 .dlclose = &dlclose,
96 .dlsym = &dlsym,
97 .dladdr = &dladdr,
98 .dlctl = &dlctl,
99 .dlerror = &dlerror,
100 .dl_iterate_phdr = &dl_iterate_phdr,
101 };
102
103
104 /*
105 * Run dtors for a single object.
106 */
107 void
_dl_run_dtors(elf_object_t * obj)108 _dl_run_dtors(elf_object_t *obj)
109 {
110 if (obj->dyn.fini_array) {
111 int num = obj->dyn.fini_arraysz / sizeof(Elf_Addr);
112 int i;
113
114 DL_DEB(("doing finiarray obj %p @%p: [%s]\n",
115 obj, obj->dyn.fini_array, obj->load_name));
116 for (i = num; i > 0; i--)
117 (*obj->dyn.fini_array[i-1])();
118 }
119
120 if (obj->dyn.fini) {
121 DL_DEB(("doing dtors obj %p @%p: [%s]\n",
122 obj, obj->dyn.fini, obj->load_name));
123 (*obj->dyn.fini)();
124 }
125 }
126
127 /*
128 * Run dtors for all objects that are eligible.
129 */
130 void
_dl_run_all_dtors(void)131 _dl_run_all_dtors(void)
132 {
133 elf_object_t *node;
134 int fini_complete;
135 int skip_initfirst;
136 int initfirst_skipped;
137
138 fini_complete = 0;
139 skip_initfirst = 1;
140 initfirst_skipped = 0;
141
142 while (fini_complete == 0) {
143 fini_complete = 1;
144 for (node = _dl_objects;
145 node != NULL;
146 node = node->next) {
147 if ((node->dyn.fini || node->dyn.fini_array) &&
148 (OBJECT_REF_CNT(node) == 0) &&
149 (node->status & STAT_INIT_DONE) &&
150 ((node->status & STAT_FINI_DONE) == 0)) {
151 if (skip_initfirst &&
152 (node->obj_flags & DF_1_INITFIRST))
153 initfirst_skipped = 1;
154 else
155 node->status |= STAT_FINI_READY;
156 }
157 }
158 for (node = _dl_objects;
159 node != NULL;
160 node = node->next) {
161 if ((node->dyn.fini || node->dyn.fini_array) &&
162 (OBJECT_REF_CNT(node) == 0) &&
163 (node->status & STAT_INIT_DONE) &&
164 ((node->status & STAT_FINI_DONE) == 0) &&
165 (!skip_initfirst ||
166 (node->obj_flags & DF_1_INITFIRST) == 0)) {
167 struct object_vector vec = node->child_vec;
168 int i;
169
170 for (i = 0; i < vec.len; i++)
171 vec.vec[i]->status &= ~STAT_FINI_READY;
172 }
173 }
174
175 for (node = _dl_objects;
176 node != NULL;
177 node = node->next) {
178 if (node->status & STAT_FINI_READY) {
179 fini_complete = 0;
180 node->status |= STAT_FINI_DONE;
181 node->status &= ~STAT_FINI_READY;
182 _dl_run_dtors(node);
183 }
184 }
185
186 if (fini_complete && initfirst_skipped)
187 fini_complete = initfirst_skipped = skip_initfirst = 0;
188 }
189 }
190
191 /*
192 * Routine to walk through all of the objects except the first
193 * (main executable).
194 *
195 * Big question, should dlopen()ed objects be unloaded before or after
196 * the destructor for the main application runs?
197 */
198 void
_dl_dtors(void)199 _dl_dtors(void)
200 {
201 _dl_thread_kern_stop();
202
203 /* ORDER? */
204 _dl_unload_dlopen();
205
206 DL_DEB(("doing dtors\n"));
207
208 _dl_objects->opencount--;
209 _dl_notify_unload_shlib(_dl_objects);
210
211 _dl_run_all_dtors();
212 }
213
214 #if DO_CLEAN_BOOT
215 void
_dl_clean_boot(void)216 _dl_clean_boot(void)
217 {
218 extern char boot_text_start[], boot_text_end[];
219 #if 0 /* XXX breaks boehm-gc?!? */
220 extern char boot_data_start[], boot_data_end[];
221 #endif
222
223 _dl_mmap(boot_text_start, boot_text_end - boot_text_start,
224 PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0);
225 _dl_mimmutable(boot_text_start, boot_text_end - boot_text_start);
226 #if 0 /* XXX breaks boehm-gc?!? */
227 _dl_mmap(boot_data_start, boot_data_end - boot_data_start,
228 PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0);
229 _dl_mimmutable(boot_data_start, boot_data_end - boot_data_start);
230 #endif
231 }
232 #endif /* DO_CLEAN_BOOT */
233
234 void
_dl_dopreload(char * paths)235 _dl_dopreload(char *paths)
236 {
237 char *cp, *dp;
238 elf_object_t *shlib;
239 int count;
240
241 dp = paths = _dl_strdup(paths);
242 if (dp == NULL)
243 _dl_oom();
244
245 /* preallocate child_vec for the LD_PRELOAD objects */
246 count = 1;
247 while (*dp++ != '\0')
248 if (*dp == ':')
249 count++;
250 object_vec_grow(&_dl_objects->child_vec, count);
251
252 dp = paths;
253 while ((cp = _dl_strsep(&dp, ":")) != NULL) {
254 shlib = _dl_load_shlib(cp, _dl_objects, OBJTYPE_LIB,
255 _dl_objects->obj_flags, 1);
256 if (shlib == NULL)
257 _dl_die("can't preload library '%s'", cp);
258 _dl_add_object(shlib);
259 _dl_link_child(shlib, _dl_objects);
260 }
261 _dl_free(paths);
262 return;
263 }
264
265 /*
266 * grab interesting environment variables, zap bad env vars if
267 * issetugid, and set the exported environ and __progname variables
268 */
269 void
_dl_setup_env(const char * argv0,char ** envp)270 _dl_setup_env(const char *argv0, char **envp)
271 {
272 static char progname_storage[NAME_MAX+1] = "";
273
274 /*
275 * Don't allow someone to change the search paths if he runs
276 * a suid program without credentials high enough.
277 */
278 _dl_trust = !_dl_issetugid();
279 if (!_dl_trust) { /* Zap paths if s[ug]id... */
280 _dl_unsetenv("LD_DEBUG", envp);
281 _dl_unsetenv("LD_LIBRARY_PATH", envp);
282 _dl_unsetenv("LD_PRELOAD", envp);
283 _dl_unsetenv("LD_BIND_NOW", envp);
284 } else {
285 /*
286 * Get paths to various things we are going to use.
287 */
288 _dl_debug = _dl_getenv("LD_DEBUG", envp) != NULL;
289 _dl_libpath = _dl_split_path(_dl_getenv("LD_LIBRARY_PATH",
290 envp));
291 _dl_preload = _dl_getenv("LD_PRELOAD", envp);
292 _dl_bindnow = _dl_getenv("LD_BIND_NOW", envp) != NULL;
293 }
294
295 /* these are usable even in setugid processes */
296 _dl_traceld = _dl_getenv("LD_TRACE_LOADED_OBJECTS", envp) != NULL;
297 _dl_tracefmt1 = _dl_getenv("LD_TRACE_LOADED_OBJECTS_FMT1", envp);
298 _dl_tracefmt2 = _dl_getenv("LD_TRACE_LOADED_OBJECTS_FMT2", envp);
299 _dl_traceprog = _dl_getenv("LD_TRACE_LOADED_OBJECTS_PROGNAME", envp);
300
301 environ = envp;
302
303 _dl_trace_setup(envp);
304
305 if (argv0 != NULL) { /* NULL ptr if argc = 0 */
306 const char *p = _dl_strrchr(argv0, '/');
307
308 if (p == NULL)
309 p = argv0;
310 else
311 p++;
312 _dl_strlcpy(progname_storage, p, sizeof(progname_storage));
313 }
314 __progname = progname_storage;
315 }
316
317 int
_dl_load_dep_libs(elf_object_t * object,int flags,int booting)318 _dl_load_dep_libs(elf_object_t *object, int flags, int booting)
319 {
320 elf_object_t *dynobj;
321 Elf_Dyn *dynp;
322 unsigned int loop;
323 int libcount;
324 int depflags, nodelete = 0;
325
326 dynobj = object;
327 while (dynobj) {
328 DL_DEB(("examining: '%s'\n", dynobj->load_name));
329 libcount = 0;
330
331 /* propagate DF_1_NOW to deplibs (can be set by dynamic tags) */
332 depflags = flags | (dynobj->obj_flags & DF_1_NOW);
333 if (booting || object->nodelete)
334 nodelete = 1;
335
336 for (dynp = dynobj->load_dyn; dynp->d_tag; dynp++) {
337 if (dynp->d_tag == DT_NEEDED) {
338 libcount++;
339 }
340 }
341
342 if (libcount != 0) {
343 struct listent {
344 Elf_Dyn *dynp;
345 elf_object_t *depobj;
346 } *liblist;
347 int *randomlist;
348
349 liblist = _dl_reallocarray(NULL, libcount,
350 sizeof(struct listent));
351 randomlist = _dl_reallocarray(NULL, libcount,
352 sizeof(int));
353
354 if (liblist == NULL || randomlist == NULL)
355 _dl_oom();
356
357 for (dynp = dynobj->load_dyn, loop = 0; dynp->d_tag;
358 dynp++)
359 if (dynp->d_tag == DT_NEEDED)
360 liblist[loop++].dynp = dynp;
361
362 /*
363 * We can't support multiple versions of libc
364 * in a single process. So remember the first
365 * libc SONAME we encounter as a dependency
366 * and use it in further loads of libc. In
367 * practice this means we will always use the
368 * libc version that the binary was linked
369 * against. This isn't entirely correct, but
370 * it will keep most binaries running when
371 * transitioning over a libc major bump.
372 */
373 if (_dl_libcname == NULL) {
374 for (loop = 0; loop < libcount; loop++) {
375 const char *libname;
376 libname = dynobj->dyn.strtab;
377 libname +=
378 liblist[loop].dynp->d_un.d_val;
379 if (_dl_strncmp(libname,
380 "libc.so.", 8) == 0) {
381 _dl_libcname = libname;
382 break;
383 }
384 }
385 }
386
387 /* Randomize these */
388 for (loop = 0; loop < libcount; loop++)
389 randomlist[loop] = loop;
390
391 for (loop = 1; loop < libcount; loop++) {
392 unsigned int rnd;
393 int cur;
394 rnd = _dl_arc4random();
395 rnd = rnd % (loop+1);
396 cur = randomlist[rnd];
397 randomlist[rnd] = randomlist[loop];
398 randomlist[loop] = cur;
399 }
400
401 for (loop = 0; loop < libcount; loop++) {
402 elf_object_t *depobj;
403 const char *libname;
404 libname = dynobj->dyn.strtab;
405 libname +=
406 liblist[randomlist[loop]].dynp->d_un.d_val;
407 DL_DEB(("loading: %s required by %s\n", libname,
408 dynobj->load_name));
409 if (_dl_strncmp(libname, "libc.so.", 8) == 0) {
410 if (_dl_libcname)
411 libname = _dl_libcname;
412 }
413 depobj = _dl_load_shlib(libname, dynobj,
414 OBJTYPE_LIB, depflags, nodelete);
415 if (depobj == 0) {
416 if (booting) {
417 _dl_die(
418 "can't load library '%s'",
419 libname);
420 }
421 DL_DEB(("dlopen: failed to open %s\n",
422 libname));
423 _dl_free(liblist);
424 _dl_free(randomlist);
425 return (1);
426 }
427 liblist[randomlist[loop]].depobj = depobj;
428 }
429
430 object_vec_grow(&dynobj->child_vec, libcount);
431 for (loop = 0; loop < libcount; loop++) {
432 _dl_add_object(liblist[loop].depobj);
433 _dl_link_child(liblist[loop].depobj, dynobj);
434 }
435 _dl_free(liblist);
436 _dl_free(randomlist);
437 }
438 dynobj = dynobj->next;
439 }
440
441 _dl_cache_grpsym_list_setup(object);
442 return(0);
443 }
444
445
446 /* do any RWX -> RX fixups for executable PLTs and apply GNU_RELRO */
447 static inline void
_dl_self_relro(long loff)448 _dl_self_relro(long loff)
449 {
450 Elf_Ehdr *ehdp;
451 Elf_Phdr *phdp;
452 int i;
453
454 ehdp = (Elf_Ehdr *)loff;
455 phdp = (Elf_Phdr *)(loff + ehdp->e_phoff);
456 for (i = 0; i < ehdp->e_phnum; i++, phdp++) {
457 switch (phdp->p_type) {
458 #if defined(__alpha__) || defined(__hppa__) || defined(__powerpc__) || \
459 defined(__sparc64__)
460 case PT_LOAD:
461 if ((phdp->p_flags & (PF_X | PF_W)) != (PF_X | PF_W))
462 break;
463 _dl_mprotect((void *)(phdp->p_vaddr + loff),
464 phdp->p_memsz, PROT_READ);
465 break;
466 #endif
467 case PT_GNU_RELRO:
468 _dl_mprotect((void *)(phdp->p_vaddr + loff),
469 phdp->p_memsz, PROT_READ);
470 _dl_mimmutable((void *)(phdp->p_vaddr + loff),
471 phdp->p_memsz);
472 break;
473 }
474 }
475 }
476
477
478 #define PFLAGS(X) ((((X) & PF_R) ? PROT_READ : 0) | \
479 (((X) & PF_W) ? PROT_WRITE : 0) | \
480 (((X) & PF_X) ? PROT_EXEC : 0))
481
482 /*
483 * To avoid kbind(2) becoming a powerful gadget, it is called inline to a
484 * function. Therefore we cannot create a precise pinsyscall label. Instead
485 * create a duplicate entry to force the kernel's pinsyscall code to skip
486 * validation, rather than labelling it illegal. kbind(2) remains safe
487 * because it self-protects by checking its calling address.
488 */
489 #define __STRINGIFY(x) #x
490 #define STRINGIFY(x) __STRINGIFY(x)
491 #ifdef __arm__
492 __asm__(".pushsection .openbsd.syscalls,\"\",%progbits;"
493 ".p2align 2;"
494 ".long 0;"
495 ".long " STRINGIFY(SYS_kbind) ";"
496 ".popsection");
497 #else
498 __asm__(".pushsection .openbsd.syscalls,\"\",@progbits;"
499 ".p2align 2;"
500 ".long 0;"
501 ".long " STRINGIFY(SYS_kbind) ";"
502 ".popsection");
503 #endif
504
505 /*
506 * This is the dynamic loader entrypoint. When entering here, depending
507 * on architecture type, the stack and registers are set up according
508 * to the architectures ABI specification. The first thing required
509 * to do is to dig out all information we need to accomplish our task.
510 */
511 unsigned long
_dl_boot(const char ** argv,char ** envp,const long dyn_loff,long * dl_data)512 _dl_boot(const char **argv, char **envp, const long dyn_loff, long *dl_data)
513 {
514 struct elf_object *exe_obj; /* Pointer to executable object */
515 struct elf_object *dyn_obj; /* Pointer to ld.so object */
516 struct r_debug **map_link; /* Where to put pointer for gdb */
517 struct r_debug *debug_map;
518 struct load_list *next_load, *load_list = NULL;
519 Elf_Dyn *dynp;
520 Elf_Phdr *phdp;
521 Elf_Ehdr *ehdr;
522 char *us = NULL;
523 unsigned int loop;
524 int failed;
525 struct dep_node *n;
526 Elf_Addr minva, maxva, exe_loff, exec_end, cur_exec_end;
527 Elf_Addr relro_addr = 0, relro_size = 0;
528 Elf_Phdr *ptls = NULL;
529 int align;
530
531 if (dl_data[AUX_pagesz] != 0)
532 _dl_pagesz = dl_data[AUX_pagesz];
533 _dl_malloc_init();
534
535 _dl_argv = argv;
536 while (_dl_argv[_dl_argc] != NULL)
537 _dl_argc++;
538 _dl_setup_env(argv[0], envp);
539
540 /*
541 * Make read-only the GOT and PLT and variables initialized
542 * during the ld.so setup above.
543 */
544 _dl_self_relro(dyn_loff);
545
546 align = _dl_pagesz - 1;
547
548 #define ROUND_PG(x) (((x) + align) & ~(align))
549 #define TRUNC_PG(x) ((x) & ~(align))
550
551 if (_dl_bindnow) {
552 /* Lazy binding disabled, so disable kbind */
553 _dl_kbind(NULL, 0, 0);
554 }
555
556 DL_DEB(("ld.so loading: '%s'\n", __progname));
557
558 /* init this in runtime, not statically */
559 TAILQ_INIT(&_dlopened_child_list);
560
561 exe_obj = NULL;
562 _dl_loading_object = NULL;
563
564 minva = ELF_NO_ADDR;
565 maxva = exe_loff = exec_end = 0;
566
567 /*
568 * Examine the user application and set up object information.
569 */
570 phdp = (Elf_Phdr *)dl_data[AUX_phdr];
571 for (loop = 0; loop < dl_data[AUX_phnum]; loop++) {
572 switch (phdp->p_type) {
573 case PT_PHDR:
574 exe_loff = (Elf_Addr)dl_data[AUX_phdr] - phdp->p_vaddr;
575 us += exe_loff;
576 DL_DEB(("exe load offset: 0x%lx\n", exe_loff));
577 break;
578 case PT_DYNAMIC:
579 minva = TRUNC_PG(minva);
580 maxva = ROUND_PG(maxva);
581 exe_obj = _dl_finalize_object(argv[0] ? argv[0] : "",
582 (Elf_Dyn *)(phdp->p_vaddr + exe_loff),
583 (Elf_Phdr *)dl_data[AUX_phdr],
584 dl_data[AUX_phnum], OBJTYPE_EXE, minva + exe_loff,
585 exe_loff);
586 _dl_add_object(exe_obj);
587 break;
588 case PT_INTERP:
589 us += phdp->p_vaddr;
590 break;
591 case PT_LOAD:
592 if (phdp->p_vaddr < minva)
593 minva = phdp->p_vaddr;
594 if (phdp->p_vaddr > maxva)
595 maxva = phdp->p_vaddr + phdp->p_memsz;
596
597 next_load = _dl_calloc(1, sizeof(struct load_list));
598 if (next_load == NULL)
599 _dl_oom();
600 next_load->next = load_list;
601 load_list = next_load;
602 next_load->start = (char *)TRUNC_PG(phdp->p_vaddr) + exe_loff;
603 next_load->size = (phdp->p_vaddr & align) + phdp->p_filesz;
604 next_load->prot = PFLAGS(phdp->p_flags);
605 cur_exec_end = (Elf_Addr)next_load->start + next_load->size;
606 if ((next_load->prot & PROT_EXEC) != 0 &&
607 cur_exec_end > exec_end)
608 exec_end = cur_exec_end;
609 break;
610 case PT_TLS:
611 if (phdp->p_filesz > phdp->p_memsz)
612 _dl_die("invalid tls data");
613 ptls = phdp;
614 break;
615 case PT_GNU_RELRO:
616 relro_addr = phdp->p_vaddr + exe_loff;
617 relro_size = phdp->p_memsz;
618 break;
619 }
620 phdp++;
621 }
622 exe_obj->load_list = load_list;
623 exe_obj->obj_flags |= DF_1_GLOBAL;
624 exe_obj->nodelete = 1;
625 exe_obj->load_size = maxva - minva;
626 exe_obj->relro_addr = relro_addr;
627 exe_obj->relro_size = relro_size;
628 _dl_set_sod(exe_obj->load_name, &exe_obj->sod);
629
630 #ifdef __i386__
631 if (exec_end > I386_MAX_EXE_ADDR)
632 _dl_exec_hint = (void *)ROUND_PG(exec_end-I386_MAX_EXE_ADDR);
633 DL_DEB(("_dl_exec_hint: 0x%lx\n", _dl_exec_hint));
634 #endif
635
636 /* TLS bits in the base executable */
637 if (ptls != NULL && ptls->p_memsz)
638 _dl_set_tls(exe_obj, ptls, exe_loff, NULL);
639
640 n = _dl_malloc(sizeof *n);
641 if (n == NULL)
642 _dl_oom();
643 n->data = exe_obj;
644 TAILQ_INSERT_TAIL(&_dlopened_child_list, n, next_sib);
645 exe_obj->opencount++;
646
647 if (_dl_preload != NULL)
648 _dl_dopreload(_dl_preload);
649
650 _dl_load_dep_libs(exe_obj, exe_obj->obj_flags, 1);
651
652 /*
653 * Now add the dynamic loader itself last in the object list
654 * so we can use the _dl_ code when serving dl.... calls.
655 * Intentionally left off the exe child_vec.
656 */
657 dynp = (Elf_Dyn *)((void *)_DYNAMIC);
658 ehdr = (Elf_Ehdr *)dl_data[AUX_base];
659 dyn_obj = _dl_finalize_object(us, dynp,
660 (Elf_Phdr *)((char *)dl_data[AUX_base] + ehdr->e_phoff),
661 ehdr->e_phnum, OBJTYPE_LDR, dl_data[AUX_base], dyn_loff);
662 _dl_add_object(dyn_obj);
663
664 dyn_obj->refcount++;
665 _dl_link_grpsym(dyn_obj);
666
667 dyn_obj->status |= STAT_RELOC_DONE;
668 _dl_set_sod(dyn_obj->load_name, &dyn_obj->sod);
669
670 /* calculate the offsets for static TLS allocations */
671 _dl_allocate_tls_offsets();
672
673 /*
674 * Make something to help gdb when poking around in the code.
675 * Do this poking at the .dynamic section now, before relocation
676 * renders it read-only
677 */
678 map_link = NULL;
679 #ifdef __mips__
680 for (dynp = exe_obj->load_dyn; dynp->d_tag; dynp++) {
681 if (dynp->d_tag == DT_MIPS_RLD_MAP_REL) {
682 map_link = (struct r_debug **)
683 (dynp->d_un.d_ptr + (Elf_Addr)dynp);
684 break;
685 } else if (dynp->d_tag == DT_MIPS_RLD_MAP) {
686 map_link = (struct r_debug **)
687 (dynp->d_un.d_ptr + exe_loff);
688 break;
689 }
690 }
691 #endif
692 if (map_link == NULL) {
693 for (dynp = exe_obj->load_dyn; dynp->d_tag; dynp++) {
694 if (dynp->d_tag == DT_DEBUG) {
695 map_link = (struct r_debug **)&dynp->d_un.d_ptr;
696 break;
697 }
698 }
699 if (dynp->d_tag != DT_DEBUG)
700 DL_DEB(("failed to mark DTDEBUG\n"));
701 }
702 if (map_link) {
703 debug_map = _dl_malloc(sizeof(*debug_map));
704 if (debug_map == NULL)
705 _dl_oom();
706 debug_map->r_version = 1;
707 debug_map->r_map = (struct link_map *)_dl_objects;
708 debug_map->r_brk = (Elf_Addr)_dl_debug_state;
709 debug_map->r_state = RT_CONSISTENT;
710 debug_map->r_ldbase = dyn_loff;
711 _dl_debug_map = debug_map;
712 #ifdef __mips__
713 relro_addr = exe_obj->relro_addr;
714 if (dynp->d_tag == DT_DEBUG &&
715 ((Elf_Addr)map_link + sizeof(*map_link) <= relro_addr ||
716 (Elf_Addr)map_link >= relro_addr + exe_obj->relro_size)) {
717 _dl_mprotect(map_link, sizeof(*map_link),
718 PROT_READ|PROT_WRITE);
719 *map_link = _dl_debug_map;
720 _dl_mprotect(map_link, sizeof(*map_link),
721 PROT_READ|PROT_EXEC);
722 } else
723 #endif
724 *map_link = _dl_debug_map;
725 }
726
727
728 /*
729 * Everything should be in place now for doing the relocation
730 * and binding. Call _dl_rtld to do the job. Fingers crossed.
731 */
732
733 failed = 0;
734 if (!_dl_traceld)
735 failed = _dl_rtld(_dl_objects);
736
737 if (_dl_debug || _dl_traceld) {
738 if (_dl_traceld)
739 _dl_pledge("stdio rpath", NULL);
740 _dl_show_objects(NULL);
741 }
742
743 DL_DEB(("dynamic loading done, %s.\n",
744 (failed == 0) ? "success":"failed"));
745
746 if (failed != 0)
747 _dl_die("relocation failed");
748
749 if (_dl_traceld)
750 _dl_exit(0);
751
752 _dl_loading_object = NULL;
753
754 /* set up the TIB for the initial thread */
755 _dl_allocate_first_tib();
756
757 _dl_fixup_user_env();
758
759 _dl_debug_state();
760
761 /*
762 * Do not run init code if run from ldd.
763 */
764 if (_dl_objects->next != NULL) {
765 _dl_call_preinit(_dl_objects);
766 _dl_call_init(_dl_objects);
767 }
768
769 DL_DEB(("entry point: 0x%lx\n", dl_data[AUX_entry]));
770
771 /*
772 * Return the entry point.
773 */
774 return(dl_data[AUX_entry]);
775 }
776
777 int
_dl_rtld(elf_object_t * object)778 _dl_rtld(elf_object_t *object)
779 {
780 struct load_list *llist;
781 int fails = 0;
782
783 if (object->next)
784 fails += _dl_rtld(object->next);
785
786 if (object->status & STAT_RELOC_DONE)
787 return 0;
788
789 /*
790 * Do relocation information first, then GOT.
791 */
792 unprotect_if_textrel(object);
793 _dl_rreloc(object);
794 fails =_dl_md_reloc(object, DT_REL, DT_RELSZ);
795 fails += _dl_md_reloc(object, DT_RELA, DT_RELASZ);
796 reprotect_if_textrel(object);
797
798 /*
799 * We do lazy resolution by default, doing eager resolution if
800 * - the object requests it with -znow, OR
801 * - LD_BIND_NOW is set and this object isn't being ltraced
802 *
803 * Note that -znow disables ltrace for the object: on at least
804 * amd64 'ld' doesn't generate the trampoline for lazy relocation
805 * when -znow is used.
806 */
807 fails += _dl_md_reloc_got(object, !(object->obj_flags & DF_1_NOW) &&
808 !(_dl_bindnow && !object->traced));
809
810 /*
811 * Look for W&X segments and make them read-only.
812 */
813 for (llist = object->load_list; llist != NULL; llist = llist->next) {
814 if ((llist->prot & PROT_WRITE) && (llist->prot & PROT_EXEC)) {
815 _dl_mprotect(llist->start, llist->size,
816 llist->prot & ~PROT_WRITE);
817 }
818 }
819
820 /*
821 * TEXTREL binaries are loaded without immutable on un-writeable sections.
822 * After text relocations are finished, these regions can become
823 * immutable. OPENBSD_MUTABLE section always overlaps writeable LOADs,
824 * so don't be afraid.
825 */
826 if (object->dyn.textrel) {
827 for (llist = object->load_list; llist != NULL; llist = llist->next)
828 if ((llist->prot & PROT_WRITE) == 0)
829 _dl_mimmutable(llist->start, llist->size);
830 }
831
832 if (fails == 0)
833 object->status |= STAT_RELOC_DONE;
834
835 return (fails);
836 }
837
838 void
_dl_call_preinit(elf_object_t * object)839 _dl_call_preinit(elf_object_t *object)
840 {
841 if (object->dyn.preinit_array) {
842 int num = object->dyn.preinit_arraysz / sizeof(Elf_Addr);
843 int i;
844
845 DL_DEB(("doing preinitarray obj %p @%p: [%s]\n",
846 object, object->dyn.preinit_array, object->load_name));
847 for (i = 0; i < num; i++)
848 (*object->dyn.preinit_array[i])(_dl_argc, _dl_argv,
849 environ, &_dl_cb_cb);
850 }
851 }
852
853 void
_dl_call_init(elf_object_t * object)854 _dl_call_init(elf_object_t *object)
855 {
856 _dl_call_init_recurse(object, 1);
857 _dl_call_init_recurse(object, 0);
858 }
859
860 static void
_dl_relro(elf_object_t * object)861 _dl_relro(elf_object_t *object)
862 {
863 /*
864 * Handle GNU_RELRO
865 */
866 if (object->relro_addr != 0 && object->relro_size != 0) {
867 Elf_Addr addr = object->relro_addr;
868
869 DL_DEB(("protect RELRO [0x%lx,0x%lx) in %s\n",
870 addr, addr + object->relro_size, object->load_name));
871 _dl_mprotect((void *)addr, object->relro_size, PROT_READ);
872
873 /* if library will never be unloaded, RELRO can be immutable */
874 if (object->nodelete)
875 _dl_mimmutable((void *)addr, object->relro_size);
876 }
877 }
878
879 void
_dl_call_init_recurse(elf_object_t * object,int initfirst)880 _dl_call_init_recurse(elf_object_t *object, int initfirst)
881 {
882 struct object_vector vec;
883 int visited_flag = initfirst ? STAT_VISIT_INITFIRST : STAT_VISIT_INIT;
884 int i;
885
886 object->status |= visited_flag;
887
888 for (vec = object->child_vec, i = 0; i < vec.len; i++) {
889 if (vec.vec[i]->status & visited_flag)
890 continue;
891 _dl_call_init_recurse(vec.vec[i], initfirst);
892 }
893
894 if (object->status & STAT_INIT_DONE)
895 return;
896
897 if (initfirst && (object->obj_flags & DF_1_INITFIRST) == 0)
898 return;
899
900 if (!initfirst) {
901 _dl_relro(object);
902 _dl_apply_immutable(object);
903 }
904
905 if (object->dyn.init) {
906 DL_DEB(("doing ctors obj %p @%p: [%s]\n",
907 object, object->dyn.init, object->load_name));
908 (*object->dyn.init)();
909 }
910
911 if (object->dyn.init_array) {
912 int num = object->dyn.init_arraysz / sizeof(Elf_Addr);
913 int i;
914
915 DL_DEB(("doing initarray obj %p @%p: [%s]\n",
916 object, object->dyn.init_array, object->load_name));
917 for (i = 0; i < num; i++)
918 (*object->dyn.init_array[i])(_dl_argc, _dl_argv,
919 environ, &_dl_cb_cb);
920 }
921
922 if (initfirst) {
923 _dl_relro(object);
924 _dl_apply_immutable(object);
925 }
926
927 object->status |= STAT_INIT_DONE;
928 }
929
930 char *
_dl_getenv(const char * var,char ** env)931 _dl_getenv(const char *var, char **env)
932 {
933 const char *ep;
934
935 while ((ep = *env++)) {
936 const char *vp = var;
937
938 while (*vp && *vp == *ep) {
939 vp++;
940 ep++;
941 }
942 if (*vp == '\0' && *ep++ == '=')
943 return((char *)ep);
944 }
945 return(NULL);
946 }
947
948 void
_dl_unsetenv(const char * var,char ** env)949 _dl_unsetenv(const char *var, char **env)
950 {
951 char *ep;
952
953 while ((ep = *env)) {
954 const char *vp = var;
955
956 while (*vp && *vp == *ep) {
957 vp++;
958 ep++;
959 }
960 if (*vp == '\0' && *ep++ == '=') {
961 char **P;
962
963 for (P = env;; ++P)
964 if (!(*P = *(P + 1)))
965 break;
966 } else
967 env++;
968 }
969 }
970
971 static inline void
fixup_sym(struct elf_object * dummy_obj,const char * name,void * addr)972 fixup_sym(struct elf_object *dummy_obj, const char *name, void *addr)
973 {
974 struct sym_res sr;
975
976 sr = _dl_find_symbol(name, SYM_SEARCH_ALL|SYM_NOWARNNOTFOUND|SYM_PLT,
977 NULL, dummy_obj);
978 if (sr.sym != NULL) {
979 void *p = (void *)(sr.sym->st_value + sr.obj->obj_base);
980 if (p != addr) {
981 DL_DEB(("setting %s %p@%s[%p] from %p\n", name,
982 p, sr.obj->load_name, (void *)sr.obj, addr));
983 *(void **)p = *(void **)addr;
984 }
985 }
986 }
987
988 /*
989 * _dl_fixup_user_env()
990 *
991 * Set the user environment so that programs can use the environment
992 * while running constructors. Specifically, MALLOC_OPTIONS= for malloc()
993 */
994 void
_dl_fixup_user_env(void)995 _dl_fixup_user_env(void)
996 {
997 struct elf_object dummy_obj;
998
999 dummy_obj.dyn.symbolic = 0;
1000 dummy_obj.load_name = "ld.so";
1001 fixup_sym(&dummy_obj, "environ", &environ);
1002 fixup_sym(&dummy_obj, "__progname", &__progname);
1003 }
1004
1005 const void *
_dl_cb_cb(int version)1006 _dl_cb_cb(int version)
1007 {
1008 DL_DEB(("version %d callbacks requested\n", version));
1009 if (version == 0)
1010 return &callbacks_0;
1011 return NULL;
1012 }
1013
1014 static inline void
unprotect_if_textrel(elf_object_t * object)1015 unprotect_if_textrel(elf_object_t *object)
1016 {
1017 struct load_list *ll;
1018
1019 if (__predict_false(object->dyn.textrel == 1)) {
1020 for (ll = object->load_list; ll != NULL; ll = ll->next) {
1021 if ((ll->prot & PROT_WRITE) == 0)
1022 _dl_mprotect(ll->start, ll->size,
1023 PROT_READ | PROT_WRITE);
1024 }
1025 }
1026 }
1027
1028 static inline void
reprotect_if_textrel(elf_object_t * object)1029 reprotect_if_textrel(elf_object_t *object)
1030 {
1031 struct load_list *ll;
1032
1033 if (__predict_false(object->dyn.textrel == 1)) {
1034 for (ll = object->load_list; ll != NULL; ll = ll->next) {
1035 if ((ll->prot & PROT_WRITE) == 0)
1036 _dl_mprotect(ll->start, ll->size, ll->prot);
1037 }
1038 }
1039 }
1040
1041 static void
_dl_rreloc(elf_object_t * object)1042 _dl_rreloc(elf_object_t *object)
1043 {
1044 const Elf_Relr *reloc, *rend;
1045 Elf_Addr loff = object->obj_base;
1046
1047 reloc = object->dyn.relr;
1048 rend = (const Elf_Relr *)((char *)reloc + object->dyn.relrsz);
1049
1050 while (reloc < rend) {
1051 Elf_Addr *where;
1052
1053 where = (Elf_Addr *)(*reloc + loff);
1054 *where++ += loff;
1055
1056 for (reloc++; reloc < rend && (*reloc & 1); reloc++) {
1057 Elf_Addr bits = *reloc >> 1;
1058
1059 Elf_Addr *here = where;
1060 while (bits != 0) {
1061 if (bits & 1) {
1062 *here += loff;
1063 }
1064 bits >>= 1;
1065 here++;
1066 }
1067 where += (8 * sizeof *reloc) - 1;
1068 }
1069 }
1070 }
1071
1072 void
_dl_push_range(struct range_vector * v,vaddr_t s,vaddr_t e)1073 _dl_push_range(struct range_vector *v, vaddr_t s, vaddr_t e)
1074 {
1075 int i = v->count;
1076
1077 if (i == nitems(v->slice)) {
1078 _dl_die("too many ranges");
1079 }
1080 /* Skips the empty ranges (s == e). */
1081 if (s < e) {
1082 v->slice[i].start = s;
1083 v->slice[i].end = e;
1084 v->count++;
1085 } else if (s > e) {
1086 _dl_die("invalid range");
1087 }
1088 }
1089
1090 void
_dl_push_range_size(struct range_vector * v,vaddr_t s,vsize_t size)1091 _dl_push_range_size(struct range_vector *v, vaddr_t s, vsize_t size)
1092 {
1093 _dl_push_range(v, s, s + size);
1094 }
1095
1096 /*
1097 * Finds the truly immutable ranges by taking mutable ones out. Implements
1098 * interval difference of imut and mut. Interval splitting necessitates
1099 * intermediate storage and complex double buffering.
1100 */
1101 void
_dl_apply_immutable(elf_object_t * object)1102 _dl_apply_immutable(elf_object_t *object)
1103 {
1104 struct range_vector acc[2]; /* flips out to avoid copying */
1105 struct addr_range *m, *im;
1106 int i, j, imut, in, out;
1107
1108 if (object->obj_type != OBJTYPE_LIB)
1109 return;
1110
1111 for (imut = 0; imut < object->imut.count; imut++) {
1112 im = &object->imut.slice[imut];
1113 out = 0;
1114 acc[out].count = 0;
1115 _dl_push_range(&acc[out], im->start, im->end);
1116
1117 for (i = 0; i < object->mut.count; i++) {
1118 m = &object->mut.slice[i];
1119 in = out;
1120 out = 1 - in;
1121 acc[out].count = 0;
1122 for (j = 0; j < acc[in].count; j++) {
1123 const vaddr_t ms = m->start, me = m->end;
1124 const vaddr_t is = acc[in].slice[j].start,
1125 ie = acc[in].slice[j].end;
1126 if (ie <= ms || me <= is) {
1127 /* is .. ie .. ms .. me -> is .. ie */
1128 /* ms .. me .. is .. ie -> is .. ie */
1129 _dl_push_range(&acc[out], is, ie);
1130 } else if (ms <= is && ie <= me) {
1131 /* PROVIDED: ms < ie && is < me */
1132 /* ms .. is .. ie .. me -> [] */
1133 ;
1134 } else if (ie <= me) {
1135 /* is .. ms .. ie .. me -> is .. ms */
1136 _dl_push_range(&acc[out], is, ms);
1137 } else if (is < ms) {
1138 /* is .. ms .. me .. ie -> is .. ms */
1139 _dl_push_range(&acc[out], is, ms);
1140 _dl_push_range(&acc[out], me, ie);
1141 } else {
1142 /* ms .. is .. me .. ie -> me .. ie */
1143 _dl_push_range(&acc[out], me, ie);
1144 }
1145 }
1146 }
1147
1148 /* and now, install immutability for objects */
1149 for (i = 0; i < acc[out].count; i++) {
1150 const struct addr_range *ar = &acc[out].slice[i];
1151 _dl_mimmutable((void *)ar->start, ar->end - ar->start);
1152 }
1153
1154 }
1155 }
1156