xref: /freebsd/libexec/rtld-elf/arm/reloc.c (revision 535af610)
1 /*	$NetBSD: mdreloc.c,v 1.23 2003/07/26 15:04:38 mrg Exp $	*/
2 
3 #include <sys/cdefs.h>
4 __FBSDID("$FreeBSD$");
5 #include <sys/param.h>
6 #include <sys/stat.h>
7 #include <sys/mman.h>
8 
9 #include <errno.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <unistd.h>
14 
15 #include "machine/sysarch.h"
16 
17 #include "debug.h"
18 #include "rtld.h"
19 #include "rtld_paths.h"
20 
21 void
22 init_pltgot(Obj_Entry *obj)
23 {
24 	if (obj->pltgot != NULL) {
25 		obj->pltgot[1] = (Elf_Addr) obj;
26 		obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start;
27 	}
28 }
29 
30 int
31 do_copy_relocations(Obj_Entry *dstobj)
32 {
33 	const Elf_Rel *rellim;
34 	const Elf_Rel *rel;
35 
36 	assert(dstobj->mainprog);	/* COPY relocations are invalid elsewhere */
37 
38 	rellim = (const Elf_Rel *)((const char *) dstobj->rel + dstobj->relsize);
39 	for (rel = dstobj->rel;  rel < rellim;  rel++) {
40 		if (ELF_R_TYPE(rel->r_info) == R_ARM_COPY) {
41 	    		void *dstaddr;
42 			const Elf_Sym *dstsym;
43 			const char *name;
44 			size_t size;
45 			const void *srcaddr;
46 			const Elf_Sym *srcsym;
47 			const Obj_Entry *srcobj, *defobj;
48 			SymLook req;
49 			int res;
50 
51 			dstaddr = (void *)(dstobj->relocbase + rel->r_offset);
52 			dstsym = dstobj->symtab + ELF_R_SYM(rel->r_info);
53 			name = dstobj->strtab + dstsym->st_name;
54 			size = dstsym->st_size;
55 
56 			symlook_init(&req, name);
57 			req.ventry = fetch_ventry(dstobj,
58 			    ELF_R_SYM(rel->r_info));
59 			req.flags = SYMLOOK_EARLY;
60 
61 			for (srcobj = globallist_next(dstobj); srcobj != NULL;
62 			    srcobj = globallist_next(srcobj)) {
63 				res = symlook_obj(&req, srcobj);
64 				if (res == 0) {
65 					srcsym = req.sym_out;
66 					defobj = req.defobj_out;
67 					break;
68 				}
69 			}
70 			if (srcobj == NULL) {
71 				_rtld_error(
72 "Undefined symbol \"%s\" referenced from COPY relocation in %s",
73 				    name, dstobj->path);
74 				return (-1);
75 			}
76 
77 			srcaddr = (const void *)(defobj->relocbase +
78 			    srcsym->st_value);
79 			memcpy(dstaddr, srcaddr, size);
80 		}
81 	}
82 	return 0;
83 }
84 
85 void _rtld_bind_start(void);
86 void _rtld_relocate_nonplt_self(Elf_Dyn *, Elf_Addr);
87 
88 void
89 _rtld_relocate_nonplt_self(Elf_Dyn *dynp, Elf_Addr relocbase)
90 {
91 	const Elf_Rel *rel = NULL, *rellim;
92 	Elf_Addr relsz = 0;
93 	Elf_Addr *where;
94 
95 	for (; dynp->d_tag != DT_NULL; dynp++) {
96 		switch (dynp->d_tag) {
97 		case DT_REL:
98 			rel = (const Elf_Rel *)(relocbase + dynp->d_un.d_ptr);
99 			break;
100 		case DT_RELSZ:
101 			relsz = dynp->d_un.d_val;
102 			break;
103 		}
104 	}
105 	rellim = (const Elf_Rel *)((const char *)rel + relsz);
106 	for (; rel < rellim; rel++) {
107 		where = (Elf_Addr *)(relocbase + rel->r_offset);
108 
109 		*where += (Elf_Addr)relocbase;
110 	}
111 }
112 /*
113  * It is possible for the compiler to emit relocations for unaligned data.
114  * We handle this situation with these inlines.
115  */
116 #define	RELOC_ALIGNED_P(x) \
117 	(((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
118 
119 static __inline Elf_Addr
120 load_ptr(void *where)
121 {
122 	Elf_Addr res;
123 
124 	memcpy(&res, where, sizeof(res));
125 
126 	return (res);
127 }
128 
129 static __inline void
130 store_ptr(void *where, Elf_Addr val)
131 {
132 
133 	memcpy(where, &val, sizeof(val));
134 }
135 
136 static int
137 reloc_nonplt_object(Obj_Entry *obj, const Elf_Rel *rel, SymCache *cache,
138     int flags, RtldLockState *lockstate)
139 {
140 	Elf_Addr        *where;
141 	const Elf_Sym   *def;
142 	const Obj_Entry *defobj;
143 	Elf_Addr         tmp;
144 	unsigned long	 symnum;
145 
146 	where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
147 	symnum = ELF_R_SYM(rel->r_info);
148 
149 	switch (ELF_R_TYPE(rel->r_info)) {
150 	case R_ARM_NONE:
151 		break;
152 
153 #if 1 /* XXX should not occur */
154 	case R_ARM_PC24: {	/* word32 S - P + A */
155 		Elf32_Sword addend;
156 
157 		/*
158 		 * Extract addend and sign-extend if needed.
159 		 */
160 		addend = *where;
161 		if (addend & 0x00800000)
162 			addend |= 0xff000000;
163 
164 		def = find_symdef(symnum, obj, &defobj, flags, cache,
165 		    lockstate);
166 		if (def == NULL)
167 				return -1;
168 			tmp = (Elf_Addr)obj->relocbase + def->st_value
169 			    - (Elf_Addr)where + (addend << 2);
170 			if ((tmp & 0xfe000000) != 0xfe000000 &&
171 			    (tmp & 0xfe000000) != 0) {
172 				_rtld_error(
173 				"%s: R_ARM_PC24 relocation @ %p to %s failed "
174 				"(displacement %ld (%#lx) out of range)",
175 				    obj->path, where,
176 				    obj->strtab + obj->symtab[symnum].st_name,
177 				    (long) tmp, (long) tmp);
178 				return -1;
179 			}
180 			tmp >>= 2;
181 			*where = (*where & 0xff000000) | (tmp & 0x00ffffff);
182 			dbg("PC24 %s in %s --> %p @ %p in %s",
183 			    obj->strtab + obj->symtab[symnum].st_name,
184 			    obj->path, (void *)*where, where, defobj->path);
185 			break;
186 		}
187 #endif
188 
189 		case R_ARM_ABS32:	/* word32 B + S + A */
190 		case R_ARM_GLOB_DAT:	/* word32 B + S */
191 			def = find_symdef(symnum, obj, &defobj, flags, cache,
192 			    lockstate);
193 			if (def == NULL)
194 				return -1;
195 			if (__predict_true(RELOC_ALIGNED_P(where))) {
196 				tmp =  *where + (Elf_Addr)defobj->relocbase +
197 				    def->st_value;
198 				*where = tmp;
199 			} else {
200 				tmp = load_ptr(where) +
201 				    (Elf_Addr)defobj->relocbase +
202 				    def->st_value;
203 				store_ptr(where, tmp);
204 			}
205 			dbg("ABS32/GLOB_DAT %s in %s --> %p @ %p in %s",
206 			    obj->strtab + obj->symtab[symnum].st_name,
207 			    obj->path, (void *)tmp, where, defobj->path);
208 			break;
209 
210 		case R_ARM_RELATIVE:	/* word32 B + A */
211 			if (__predict_true(RELOC_ALIGNED_P(where))) {
212 				tmp = *where + (Elf_Addr)obj->relocbase;
213 				*where = tmp;
214 			} else {
215 				tmp = load_ptr(where) +
216 				    (Elf_Addr)obj->relocbase;
217 				store_ptr(where, tmp);
218 			}
219 			dbg("RELATIVE in %s --> %p", obj->path,
220 			    (void *)tmp);
221 			break;
222 
223 		case R_ARM_COPY:
224 			/*
225 			 * These are deferred until all other relocations have
226 			 * been done.  All we do here is make sure that the
227 			 * COPY relocation is not in a shared library.  They
228 			 * are allowed only in executable files.
229 			 */
230 			if (!obj->mainprog) {
231 				_rtld_error(
232 			"%s: Unexpected R_COPY relocation in shared library",
233 				    obj->path);
234 				return -1;
235 			}
236 			dbg("COPY (avoid in main)");
237 			break;
238 
239 		case R_ARM_TLS_DTPOFF32:
240 			def = find_symdef(symnum, obj, &defobj, flags, cache,
241 			    lockstate);
242 			if (def == NULL)
243 				return -1;
244 
245 			tmp = (Elf_Addr)(def->st_value);
246 			if (__predict_true(RELOC_ALIGNED_P(where)))
247 				*where = tmp;
248 			else
249 				store_ptr(where, tmp);
250 
251 			dbg("TLS_DTPOFF32 %s in %s --> %p",
252 			    obj->strtab + obj->symtab[symnum].st_name,
253 			    obj->path, (void *)tmp);
254 
255 			break;
256 		case R_ARM_TLS_DTPMOD32:
257 			def = find_symdef(symnum, obj, &defobj, flags, cache,
258 			    lockstate);
259 			if (def == NULL)
260 				return -1;
261 
262 			tmp = (Elf_Addr)(defobj->tlsindex);
263 			if (__predict_true(RELOC_ALIGNED_P(where)))
264 				*where = tmp;
265 			else
266 				store_ptr(where, tmp);
267 
268 			dbg("TLS_DTPMOD32 %s in %s --> %p",
269 			    obj->strtab + obj->symtab[symnum].st_name,
270 			    obj->path, (void *)tmp);
271 
272 			break;
273 
274 		case R_ARM_TLS_TPOFF32:
275 			def = find_symdef(symnum, obj, &defobj, flags, cache,
276 			    lockstate);
277 			if (def == NULL)
278 				return -1;
279 
280 			if (!defobj->tls_static && !allocate_tls_offset(obj))
281 				return -1;
282 
283 			tmp = (Elf_Addr)def->st_value + defobj->tlsoffset;
284 			if (__predict_true(RELOC_ALIGNED_P(where)))
285 				*where = tmp;
286 			else
287 				store_ptr(where, tmp);
288 			dbg("TLS_TPOFF32 %s in %s --> %p",
289 			    obj->strtab + obj->symtab[symnum].st_name,
290 			    obj->path, (void *)tmp);
291 			break;
292 
293 
294 		default:
295 			dbg("sym = %lu, type = %lu, offset = %p, "
296 			    "contents = %p, symbol = %s",
297 			    symnum, (u_long)ELF_R_TYPE(rel->r_info),
298 			    (void *)rel->r_offset, (void *)load_ptr(where),
299 			    obj->strtab + obj->symtab[symnum].st_name);
300 			_rtld_error("%s: Unsupported relocation type %ld "
301 			    "in non-PLT relocations\n",
302 			    obj->path, (u_long) ELF_R_TYPE(rel->r_info));
303 			return -1;
304 	}
305 	return 0;
306 }
307 
308 /*
309  *  * Process non-PLT relocations
310  *   */
311 int
312 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
313     RtldLockState *lockstate)
314 {
315 	const Elf_Rel *rellim;
316 	const Elf_Rel *rel;
317 	SymCache *cache;
318 	int r = -1;
319 
320 	/* The relocation for the dynamic loader has already been done. */
321 	if (obj == obj_rtld)
322 		return (0);
323 	if ((flags & SYMLOOK_IFUNC) != 0)
324 		/* XXX not implemented */
325 		return (0);
326 
327 	/*
328  	 * The dynamic loader may be called from a thread, we have
329 	 * limited amounts of stack available so we cannot use alloca().
330 	 */
331 	cache = calloc(obj->dynsymcount, sizeof(SymCache));
332 	/* No need to check for NULL here */
333 
334 	rellim = (const Elf_Rel *)((const char *)obj->rel + obj->relsize);
335 	for (rel = obj->rel; rel < rellim; rel++) {
336 		if (reloc_nonplt_object(obj, rel, cache, flags, lockstate) < 0)
337 			goto done;
338 	}
339 	r = 0;
340 done:
341 	if (cache != NULL)
342 		free(cache);
343 	return (r);
344 }
345 
346 /*
347  *  * Process the PLT relocations.
348  *   */
349 int
350 reloc_plt(Obj_Entry *obj, int flags __unused, RtldLockState *lockstate __unused)
351 {
352 	const Elf_Rel *rellim;
353 	const Elf_Rel *rel;
354 
355 	rellim = (const Elf_Rel *)((const char *)obj->pltrel +
356 	    obj->pltrelsize);
357 	for (rel = obj->pltrel;  rel < rellim;  rel++) {
358 		Elf_Addr *where;
359 
360 		assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
361 
362 		where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
363 		*where += (Elf_Addr )obj->relocbase;
364 	}
365 
366 	return (0);
367 }
368 
369 /*
370  *  * LD_BIND_NOW was set - force relocation for all jump slots
371  *   */
372 int
373 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
374 {
375 	const Obj_Entry *defobj;
376 	const Elf_Rel *rellim;
377 	const Elf_Rel *rel;
378 	const Elf_Sym *def;
379 	Elf_Addr *where;
380 	Elf_Addr target;
381 
382 	rellim = (const Elf_Rel *)((const char *)obj->pltrel + obj->pltrelsize);
383 	for (rel = obj->pltrel; rel < rellim; rel++) {
384 		assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
385 		where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
386 		def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
387 		    SYMLOOK_IN_PLT | flags, NULL, lockstate);
388 		if (def == NULL) {
389 			dbg("reloc_jmpslots: sym not found");
390 			return (-1);
391 		}
392 
393 		target = (Elf_Addr)(defobj->relocbase + def->st_value);
394 		reloc_jmpslot(where, target, defobj, obj,
395 		    (const Elf_Rel *) rel);
396 	}
397 
398 	obj->jmpslots_done = true;
399 
400 	return (0);
401 }
402 
403 int
404 reloc_iresolve(Obj_Entry *obj __unused,
405     struct Struct_RtldLockState *lockstate __unused)
406 {
407 
408 	/* XXX not implemented */
409 	return (0);
410 }
411 
412 int
413 reloc_iresolve_nonplt(Obj_Entry *obj __unused,
414     struct Struct_RtldLockState *lockstate __unused)
415 {
416 
417 	/* XXX not implemented */
418 	return (0);
419 }
420 
421 int
422 reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused,
423     struct Struct_RtldLockState *lockstate __unused)
424 {
425 
426 	/* XXX not implemented */
427 	return (0);
428 }
429 
430 Elf_Addr
431 reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
432     const Obj_Entry *defobj __unused, const Obj_Entry *obj __unused,
433     const Elf_Rel *rel)
434 {
435 
436 	assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
437 
438 	if (*where != target && !ld_bind_not)
439 		*where = target;
440 	return (target);
441 }
442 
443 void
444 ifunc_init(Elf_Auxinfo aux_info[__min_size(AT_COUNT)] __unused)
445 {
446 
447 }
448 
449 void
450 allocate_initial_tls(Obj_Entry *objs)
451 {
452 	/*
453 	* Fix the size of the static TLS block by using the maximum
454 	* offset allocated so far and adding a bit for dynamic modules to
455 	* use.
456 	*/
457 
458 	tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA;
459 
460 	_tcb_set(allocate_tls(objs, NULL, TLS_TCB_SIZE, TLS_TCB_ALIGN));
461 }
462 
463 void *
464 __tls_get_addr(tls_index* ti)
465 {
466 	uintptr_t **dtvp;
467 
468 	dtvp = &_tcb_get()->tcb_dtv;
469 	return (tls_get_addr_common(dtvp, ti->ti_module, ti->ti_offset));
470 }
471