xref: /openbsd/libexec/ld.so/library_mquery.c (revision fc61954a)
1 /*	$OpenBSD: library_mquery.c,v 1.56 2016/08/12 20:39:01 deraadt Exp $ */
2 
3 /*
4  * Copyright (c) 2002 Dale Rahn
5  * Copyright (c) 1998 Per Fogelstrom, Opsycon AB
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
17  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 
30 #define _DYN_LOADER
31 
32 #include <sys/types.h>
33 #include <fcntl.h>
34 #include <sys/mman.h>
35 
36 #include "syscall.h"
37 #include "archdep.h"
38 #include "resolve.h"
39 #include "sod.h"
40 
41 #define PFLAGS(X) ((((X) & PF_R) ? PROT_READ : 0) | \
42 		   (((X) & PF_W) ? PROT_WRITE : 0) | \
43 		   (((X) & PF_X) ? PROT_EXEC : 0))
44 
45 void
46 _dl_load_list_free(struct load_list *load_list)
47 {
48 	struct load_list *next;
49 	Elf_Addr align = _dl_pagesz - 1;
50 
51 	while (load_list != NULL) {
52 		if (load_list->start != NULL)
53 			_dl_munmap(load_list->start,
54 			    ((load_list->size) + align) & ~align);
55 		next = load_list->next;
56 		_dl_free(load_list);
57 		load_list = next;
58 	}
59 }
60 
61 
62 void
63 _dl_unload_shlib(elf_object_t *object)
64 {
65 	struct dep_node *n;
66 	elf_object_t *load_object = object->load_object;
67 
68 	/*
69 	 * If our load object has become unreferenced then we lost the
70 	 * last group reference to it, so the entire group should be taken
71 	 * down.  The current object is somewhere below load_object in
72 	 * the child_list tree, so it'll get cleaned up by the recursion.
73 	 * That means we can just switch here to the load object.
74 	 */
75 	if (load_object != object && OBJECT_REF_CNT(load_object) == 0 &&
76 	    (load_object->status & STAT_UNLOADED) == 0) {
77 		DL_DEB(("unload_shlib switched from %s to %s\n",
78 		    object->load_name, load_object->load_name));
79 		object = load_object;
80 		goto unload;
81 	}
82 
83 	DL_DEB(("unload_shlib called on %s\n", object->load_name));
84 	if (OBJECT_REF_CNT(object) == 0 &&
85 	    (object->status & STAT_UNLOADED) == 0) {
86 unload:
87 		object->status |= STAT_UNLOADED;
88 		TAILQ_FOREACH(n, &object->child_list, next_sib)
89 			_dl_unload_shlib(n->data);
90 		TAILQ_FOREACH(n, &object->grpref_list, next_sib)
91 			_dl_unload_shlib(n->data);
92 		DL_DEB(("unload_shlib unloading on %s\n", object->load_name));
93 		_dl_load_list_free(object->load_list);
94 		_dl_remove_object(object);
95 	}
96 }
97 
98 
99 elf_object_t *
100 _dl_tryload_shlib(const char *libname, int type, int flags)
101 {
102 	int libfile, i;
103 	struct load_list *ld, *lowld = NULL;
104 	elf_object_t *object;
105 	Elf_Dyn *dynp = NULL;
106 	Elf_Ehdr *ehdr;
107 	Elf_Phdr *phdp;
108 	Elf_Addr load_end = 0;
109 	Elf_Addr align = _dl_pagesz - 1, off, size;
110 	Elf_Phdr *ptls = NULL;
111 	Elf_Addr relro_addr = 0, relro_size = 0;
112 	struct stat sb;
113 	char hbuf[4096];
114 
115 #define ROUND_PG(x) (((x) + align) & ~(align))
116 #define TRUNC_PG(x) ((x) & ~(align))
117 
118 	libfile = _dl_open(libname, O_RDONLY | O_CLOEXEC);
119 	if (libfile < 0) {
120 		_dl_errno = DL_CANT_OPEN;
121 		return(0);
122 	}
123 
124 	if ( _dl_fstat(libfile, &sb) < 0) {
125 		_dl_errno = DL_CANT_OPEN;
126 		return(0);
127 	}
128 
129 	for (object = _dl_objects; object != NULL; object = object->next) {
130 		if (object->dev == sb.st_dev &&
131 		    object->inode == sb.st_ino) {
132 			object->obj_flags |= flags & DF_1_GLOBAL;
133 			_dl_close(libfile);
134 			if (_dl_loading_object == NULL)
135 				_dl_loading_object = object;
136 			if (object->load_object != _dl_objects &&
137 			    object->load_object != _dl_loading_object) {
138 				_dl_link_grpref(object->load_object,
139 				    _dl_loading_object);
140 			}
141 			return(object);
142 		}
143 	}
144 
145 	_dl_read(libfile, hbuf, sizeof(hbuf));
146 	ehdr = (Elf_Ehdr *)hbuf;
147 	if (ehdr->e_ident[0] != ELFMAG0  || ehdr->e_ident[1] != ELFMAG1 ||
148 	    ehdr->e_ident[2] != ELFMAG2 || ehdr->e_ident[3] != ELFMAG3 ||
149 	    ehdr->e_type != ET_DYN || ehdr->e_machine != MACHID) {
150 		_dl_close(libfile);
151 		_dl_errno = DL_NOT_ELF;
152 		return(0);
153 	}
154 
155 	/* Insertion sort */
156 #define LDLIST_INSERT(ld) do { \
157 	struct load_list **_ld; \
158 	for (_ld = &lowld; *_ld != NULL; _ld = &(*_ld)->next) \
159 		if ((*_ld)->moff > ld->moff) \
160 			break; \
161 	ld->next = *_ld; \
162 	*_ld = ld; \
163 } while (0)
164 	/*
165 	 *  Alright, we might have a winner!
166 	 *  Figure out how much VM space we need and set up the load
167 	 *  list that we'll use to find free VM space.
168 	 */
169 	phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff);
170 	for (i = 0; i < ehdr->e_phnum; i++, phdp++) {
171 		switch (phdp->p_type) {
172 		case PT_LOAD:
173 			off = (phdp->p_vaddr & align);
174 			size = off + phdp->p_filesz;
175 
176 			if (size != 0) {
177 				ld = _dl_malloc(sizeof(struct load_list));
178 				if (ld == NULL)
179 					_dl_exit(7);
180 				ld->start = NULL;
181 				ld->size = size;
182 				ld->moff = TRUNC_PG(phdp->p_vaddr);
183 				ld->foff = TRUNC_PG(phdp->p_offset);
184 				ld->prot = PFLAGS(phdp->p_flags);
185 				LDLIST_INSERT(ld);
186 			}
187 
188 			if ((PFLAGS(phdp->p_flags) & PROT_WRITE) == 0 ||
189 			    ROUND_PG(size) == ROUND_PG(off + phdp->p_memsz))
190 				break;
191 			/* This phdr has a zfod section */
192 			ld = _dl_calloc(1, sizeof(struct load_list));
193 			if (ld == NULL)
194 				_dl_exit(7);
195 			ld->start = NULL;
196 			ld->size = ROUND_PG(off + phdp->p_memsz) -
197 			    ROUND_PG(size);
198 			ld->moff = TRUNC_PG(phdp->p_vaddr) +
199 			    ROUND_PG(size);
200 			ld->foff = -1;
201 			ld->prot = PFLAGS(phdp->p_flags);
202 			LDLIST_INSERT(ld);
203 			break;
204 		case PT_DYNAMIC:
205 			dynp = (Elf_Dyn *)phdp->p_vaddr;
206 			break;
207 		case PT_TLS:
208 			if (phdp->p_filesz > phdp->p_memsz) {
209 				_dl_printf("%s: invalid tls data in %s.\n",
210 				    __progname, libname);
211 				_dl_close(libfile);
212 				_dl_errno = DL_CANT_LOAD_OBJ;
213 				return(0);
214 			}
215 			if (!_dl_tib_static_done) {
216 				ptls = phdp;
217 				break;
218 			}
219 			_dl_printf("%s: unsupported TLS program header in %s\n",
220 			    __progname, libname);
221 			_dl_close(libfile);
222 			_dl_errno = DL_CANT_LOAD_OBJ;
223 			return(0);
224 		default:
225 			break;
226 		}
227 	}
228 
229 #define LOFF ((Elf_Addr)lowld->start - lowld->moff)
230 
231 retry:
232 	for (ld = lowld; ld != NULL; ld = ld->next) {
233 		off_t foff;
234 		int fd, flags;
235 		void *res;
236 
237 		flags = MAP_PRIVATE;
238 		if (LOFF + ld->moff != 0)
239 			flags |= MAP_FIXED | __MAP_NOREPLACE;
240 
241 		if (ld->foff < 0) {
242 			fd = -1;
243 			foff = 0;
244 			flags |= MAP_ANON;
245 		} else {
246 			fd = libfile;
247 			foff = ld->foff;
248 		}
249 
250 		res = _dl_mmap((void *)(LOFF + ld->moff), ROUND_PG(ld->size),
251 		    ld->prot, flags, fd, foff);
252 		if (_dl_mmap_error(res)) {
253 			/*
254 			 * The mapping we wanted isn't free, so we do an
255 			 * mquery without MAP_FIXED to get the next free
256 			 * mapping, adjust the base mapping address to match
257 			 * this free mapping and restart the process again.
258 			 *
259 			 * XXX - we need some kind of boundary condition
260 			 * here, or fix mquery to not run into the stack
261 			 */
262 			res = _dl_mquery((void *)(LOFF + ld->moff),
263 			    ROUND_PG(ld->size), ld->prot,
264 			    flags & ~(MAP_FIXED | __MAP_NOREPLACE), fd, foff);
265 
266 			/*
267 			 * If ld == lowld, then ld->start is just a hint and
268 			 * thus shouldn't be unmapped.
269 			 */
270 			ld->start = NULL;
271 
272 			/* Unmap any mappings that we did get in. */
273 			for (ld = lowld; ld != NULL; ld = ld->next) {
274 				if (ld->start == NULL)
275 					break;
276 				_dl_munmap(ld->start, ROUND_PG(ld->size));
277 				ld->start = NULL;
278 			}
279 
280 			/* if the mquery failed, give up */
281 			if (_dl_mmap_error(res))
282 				goto fail;
283 
284 			/* otherwise, reset the start of the base mapping */
285 			lowld->start = res - ld->moff + lowld->moff;
286 			goto retry;
287 		}
288 
289 		ld->start = res;
290 	}
291 
292 	for (ld = lowld; ld != NULL; ld = ld->next) {
293 		/* Zero out everything past the EOF */
294 		if ((ld->prot & PROT_WRITE) != 0 && (ld->size & align) != 0)
295 			_dl_memset((char *)ld->start + ld->size, 0,
296 			    _dl_pagesz - (ld->size & align));
297 		load_end = (Elf_Addr)ld->start + ROUND_PG(ld->size);
298 	}
299 
300 	phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff);
301 	for (i = 0; i < ehdr->e_phnum; i++, phdp++) {
302 		if (phdp->p_type == PT_OPENBSD_RANDOMIZE)
303 			_dl_arc4randombuf((char *)(phdp->p_vaddr + LOFF),
304 			    phdp->p_memsz);
305 		else if (phdp->p_type == PT_GNU_RELRO) {
306 			relro_addr = phdp->p_vaddr + LOFF;
307 			relro_size = phdp->p_memsz;
308 		}
309 	}
310 
311 	_dl_close(libfile);
312 
313 	dynp = (Elf_Dyn *)((unsigned long)dynp + LOFF);
314 	object = _dl_finalize_object(libname, dynp,
315 	    (Elf_Phdr *)((char *)lowld->start + ehdr->e_phoff), ehdr->e_phnum,
316 	    type, (Elf_Addr)lowld->start, LOFF);
317 	if (object) {
318 		object->load_size = (Elf_Addr)load_end - (Elf_Addr)lowld->start;
319 		object->load_list = lowld;
320 		/* set inode, dev from stat info */
321 		object->dev = sb.st_dev;
322 		object->inode = sb.st_ino;
323 		object->obj_flags |= flags;
324 		object->relro_addr = relro_addr;
325 		object->relro_size = relro_size;
326 		_dl_set_sod(object->load_name, &object->sod);
327 		if (ptls != NULL && ptls->p_memsz)
328 			_dl_set_tls(object, ptls, (Elf_Addr)lowld->start,
329 			    libname);
330 	} else {
331 		_dl_load_list_free(lowld);
332 	}
333 	return(object);
334 fail:
335 	_dl_printf("%s: rtld mmap failed mapping %s.\n", __progname, libname);
336 	_dl_close(libfile);
337 	_dl_errno = DL_CANT_MMAP;
338 	_dl_load_list_free(lowld);
339 	return(0);
340 }
341