xref: /freebsd/libexec/rtld-elf/arm/reloc.c (revision 8fd53f45)
1 /*	$NetBSD: mdreloc.c,v 1.23 2003/07/26 15:04:38 mrg Exp $	*/
2 
3 #include <sys/cdefs.h>
4 __FBSDID("$FreeBSD$");
5 #include <sys/param.h>
6 #include <sys/mman.h>
7 
8 #include <errno.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <unistd.h>
13 
14 #include "machine/sysarch.h"
15 
16 #include "debug.h"
17 #include "rtld.h"
18 #include "paths.h"
19 
20 void
21 arm_abi_variant_hook(Elf_Auxinfo **aux_info)
22 {
23 	Elf_Word ehdr;
24 
25 	/*
26 	 * If we're running an old kernel that doesn't provide any data fail
27 	 * safe by doing nothing.
28 	 */
29 	if (aux_info[AT_EHDRFLAGS] == NULL)
30 		return;
31 	ehdr = aux_info[AT_EHDRFLAGS]->a_un.a_val;
32 
33 	/*
34 	 * Hard float ABI binaries are the default, and use the default paths
35 	 * and such.
36 	 */
37 	if ((ehdr & EF_ARM_VFP_FLOAT) != 0)
38 		return;
39 
40 	/*
41 	 * This is a soft float ABI binary. We need to use the soft float
42 	 * settings. For the moment, the standard library path includes the hard
43 	 * float paths as well. When upgrading, we need to execute the wrong
44 	 * kind of binary until we've installed the new binaries. We could go
45 	 * off whether or not /libsoft exists, but the simplicity of having it
46 	 * in the path wins.
47 	 */
48 	ld_elf_hints_default = _PATH_SOFT_ELF_HINTS;
49 	ld_path_libmap_conf = _PATH_SOFT_LIBMAP_CONF;
50 	ld_path_rtld = _PATH_SOFT_RTLD;
51 	ld_standard_library_path = SOFT_STANDARD_LIBRARY_PATH;
52 	ld_env_prefix = LD_SOFT_;
53 }
54 
55 void
56 init_pltgot(Obj_Entry *obj)
57 {
58 	if (obj->pltgot != NULL) {
59 		obj->pltgot[1] = (Elf_Addr) obj;
60 		obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start;
61 	}
62 }
63 
64 int
65 do_copy_relocations(Obj_Entry *dstobj)
66 {
67 	const Elf_Rel *rellim;
68 	const Elf_Rel *rel;
69 
70 	assert(dstobj->mainprog);	/* COPY relocations are invalid elsewhere */
71 
72    	rellim = (const Elf_Rel *) ((caddr_t) dstobj->rel + dstobj->relsize);
73 	for (rel = dstobj->rel;  rel < rellim;  rel++) {
74 		if (ELF_R_TYPE(rel->r_info) == R_ARM_COPY) {
75 	    		void *dstaddr;
76 			const Elf_Sym *dstsym;
77 			const char *name;
78 			size_t size;
79 			const void *srcaddr;
80 			const Elf_Sym *srcsym;
81 			const Obj_Entry *srcobj, *defobj;
82 			SymLook req;
83 			int res;
84 
85 			dstaddr = (void *) (dstobj->relocbase + rel->r_offset);
86 			dstsym = dstobj->symtab + ELF_R_SYM(rel->r_info);
87 			name = dstobj->strtab + dstsym->st_name;
88 			size = dstsym->st_size;
89 
90 			symlook_init(&req, name);
91 			req.ventry = fetch_ventry(dstobj,
92 			    ELF_R_SYM(rel->r_info));
93 			req.flags = SYMLOOK_EARLY;
94 
95 			for (srcobj = dstobj->next;  srcobj != NULL;
96 			     srcobj = srcobj->next) {
97 				res = symlook_obj(&req, srcobj);
98 				if (res == 0) {
99 					srcsym = req.sym_out;
100 					defobj = req.defobj_out;
101 					break;
102 				}
103 			}
104 			if (srcobj == NULL) {
105 				_rtld_error(
106 "Undefined symbol \"%s\" referenced from COPY relocation in %s",
107 				    name, dstobj->path);
108 				return (-1);
109 			}
110 
111 			srcaddr = (const void *)(defobj->relocbase +
112 			    srcsym->st_value);
113 			memcpy(dstaddr, srcaddr, size);
114 		}
115 	}
116 	return 0;
117 }
118 
119 void _rtld_bind_start(void);
120 void _rtld_relocate_nonplt_self(Elf_Dyn *, Elf_Addr);
121 
122 int open();
123 int _open();
124 void
125 _rtld_relocate_nonplt_self(Elf_Dyn *dynp, Elf_Addr relocbase)
126 {
127 	const Elf_Rel *rel = 0, *rellim;
128 	Elf_Addr relsz = 0;
129 	Elf_Addr *where;
130 	uint32_t size;
131 
132 	for (; dynp->d_tag != DT_NULL; dynp++) {
133 		switch (dynp->d_tag) {
134 		case DT_REL:
135 			rel = (const Elf_Rel *)(relocbase + dynp->d_un.d_ptr);
136 			break;
137 		case DT_RELSZ:
138 			relsz = dynp->d_un.d_val;
139 			break;
140 		}
141 	}
142 	rellim = (const Elf_Rel *)((caddr_t)rel + relsz);
143 	size = (rellim - 1)->r_offset - rel->r_offset;
144 	for (; rel < rellim; rel++) {
145 		where = (Elf_Addr *)(relocbase + rel->r_offset);
146 
147 		*where += (Elf_Addr)relocbase;
148 	}
149 }
150 /*
151  * It is possible for the compiler to emit relocations for unaligned data.
152  * We handle this situation with these inlines.
153  */
154 #define	RELOC_ALIGNED_P(x) \
155 	(((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
156 
157 static __inline Elf_Addr
158 load_ptr(void *where)
159 {
160 	Elf_Addr res;
161 
162 	memcpy(&res, where, sizeof(res));
163 
164 	return (res);
165 }
166 
167 static __inline void
168 store_ptr(void *where, Elf_Addr val)
169 {
170 
171 	memcpy(where, &val, sizeof(val));
172 }
173 
174 static int
175 reloc_nonplt_object(Obj_Entry *obj, const Elf_Rel *rel, SymCache *cache,
176     int flags, RtldLockState *lockstate)
177 {
178 	Elf_Addr        *where;
179 	const Elf_Sym   *def;
180 	const Obj_Entry *defobj;
181 	Elf_Addr         tmp;
182 	unsigned long	 symnum;
183 
184 	where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
185 	symnum = ELF_R_SYM(rel->r_info);
186 
187 	switch (ELF_R_TYPE(rel->r_info)) {
188 	case R_ARM_NONE:
189 		break;
190 
191 #if 1 /* XXX should not occur */
192 	case R_ARM_PC24: {	/* word32 S - P + A */
193 		Elf32_Sword addend;
194 
195 		/*
196 		 * Extract addend and sign-extend if needed.
197 		 */
198 		addend = *where;
199 		if (addend & 0x00800000)
200 			addend |= 0xff000000;
201 
202 		def = find_symdef(symnum, obj, &defobj, flags, cache,
203 		    lockstate);
204 		if (def == NULL)
205 				return -1;
206 			tmp = (Elf_Addr)obj->relocbase + def->st_value
207 			    - (Elf_Addr)where + (addend << 2);
208 			if ((tmp & 0xfe000000) != 0xfe000000 &&
209 			    (tmp & 0xfe000000) != 0) {
210 				_rtld_error(
211 				"%s: R_ARM_PC24 relocation @ %p to %s failed "
212 				"(displacement %ld (%#lx) out of range)",
213 				    obj->path, where,
214 				    obj->strtab + obj->symtab[symnum].st_name,
215 				    (long) tmp, (long) tmp);
216 				return -1;
217 			}
218 			tmp >>= 2;
219 			*where = (*where & 0xff000000) | (tmp & 0x00ffffff);
220 			dbg("PC24 %s in %s --> %p @ %p in %s",
221 			    obj->strtab + obj->symtab[symnum].st_name,
222 			    obj->path, (void *)*where, where, defobj->path);
223 			break;
224 		}
225 #endif
226 
227 		case R_ARM_ABS32:	/* word32 B + S + A */
228 		case R_ARM_GLOB_DAT:	/* word32 B + S */
229 			def = find_symdef(symnum, obj, &defobj, flags, cache,
230 			    lockstate);
231 			if (def == NULL)
232 				return -1;
233 			if (__predict_true(RELOC_ALIGNED_P(where))) {
234 				tmp =  *where + (Elf_Addr)defobj->relocbase +
235 				    def->st_value;
236 				*where = tmp;
237 			} else {
238 				tmp = load_ptr(where) +
239 				    (Elf_Addr)defobj->relocbase +
240 				    def->st_value;
241 				store_ptr(where, tmp);
242 			}
243 			dbg("ABS32/GLOB_DAT %s in %s --> %p @ %p in %s",
244 			    obj->strtab + obj->symtab[symnum].st_name,
245 			    obj->path, (void *)tmp, where, defobj->path);
246 			break;
247 
248 		case R_ARM_RELATIVE:	/* word32 B + A */
249 			if (__predict_true(RELOC_ALIGNED_P(where))) {
250 				tmp = *where + (Elf_Addr)obj->relocbase;
251 				*where = tmp;
252 			} else {
253 				tmp = load_ptr(where) +
254 				    (Elf_Addr)obj->relocbase;
255 				store_ptr(where, tmp);
256 			}
257 			dbg("RELATIVE in %s --> %p", obj->path,
258 			    (void *)tmp);
259 			break;
260 
261 		case R_ARM_COPY:
262 			/*
263 			 * These are deferred until all other relocations have
264 			 * been done.  All we do here is make sure that the
265 			 * COPY relocation is not in a shared library.  They
266 			 * are allowed only in executable files.
267 			 */
268 			if (!obj->mainprog) {
269 				_rtld_error(
270 			"%s: Unexpected R_COPY relocation in shared library",
271 				    obj->path);
272 				return -1;
273 			}
274 			dbg("COPY (avoid in main)");
275 			break;
276 
277 		case R_ARM_TLS_DTPOFF32:
278 			def = find_symdef(symnum, obj, &defobj, flags, cache,
279 			    lockstate);
280 			if (def == NULL)
281 				return -1;
282 
283 			tmp = (Elf_Addr)(def->st_value);
284 			if (__predict_true(RELOC_ALIGNED_P(where)))
285 				*where = tmp;
286 			else
287 				store_ptr(where, tmp);
288 
289 			dbg("TLS_DTPOFF32 %s in %s --> %p",
290 			    obj->strtab + obj->symtab[symnum].st_name,
291 			    obj->path, (void *)tmp);
292 
293 			break;
294 		case R_ARM_TLS_DTPMOD32:
295 			def = find_symdef(symnum, obj, &defobj, flags, cache,
296 			    lockstate);
297 			if (def == NULL)
298 				return -1;
299 
300 			tmp = (Elf_Addr)(defobj->tlsindex);
301 			if (__predict_true(RELOC_ALIGNED_P(where)))
302 				*where = tmp;
303 			else
304 				store_ptr(where, tmp);
305 
306 			dbg("TLS_DTPMOD32 %s in %s --> %p",
307 			    obj->strtab + obj->symtab[symnum].st_name,
308 			    obj->path, (void *)tmp);
309 
310 			break;
311 
312 		case R_ARM_TLS_TPOFF32:
313 			def = find_symdef(symnum, obj, &defobj, flags, cache,
314 			    lockstate);
315 			if (def == NULL)
316 				return -1;
317 
318 			if (!defobj->tls_done && allocate_tls_offset(obj))
319 				return -1;
320 
321 			/* XXX: FIXME */
322 			tmp = (Elf_Addr)def->st_value + defobj->tlsoffset +
323 			    TLS_TCB_SIZE;
324 			if (__predict_true(RELOC_ALIGNED_P(where)))
325 				*where = tmp;
326 			else
327 				store_ptr(where, tmp);
328 			dbg("TLS_TPOFF32 %s in %s --> %p",
329 			    obj->strtab + obj->symtab[symnum].st_name,
330 			    obj->path, (void *)tmp);
331 			break;
332 
333 
334 		default:
335 			dbg("sym = %lu, type = %lu, offset = %p, "
336 			    "contents = %p, symbol = %s",
337 			    symnum, (u_long)ELF_R_TYPE(rel->r_info),
338 			    (void *)rel->r_offset, (void *)load_ptr(where),
339 			    obj->strtab + obj->symtab[symnum].st_name);
340 			_rtld_error("%s: Unsupported relocation type %ld "
341 			    "in non-PLT relocations\n",
342 			    obj->path, (u_long) ELF_R_TYPE(rel->r_info));
343 			return -1;
344 	}
345 	return 0;
346 }
347 
348 /*
349  *  * Process non-PLT relocations
350  *   */
351 int
352 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
353     RtldLockState *lockstate)
354 {
355 	const Elf_Rel *rellim;
356 	const Elf_Rel *rel;
357 	SymCache *cache;
358 	int r = -1;
359 
360 	/* The relocation for the dynamic loader has already been done. */
361 	if (obj == obj_rtld)
362 		return (0);
363 	if ((flags & SYMLOOK_IFUNC) != 0)
364 		/* XXX not implemented */
365 		return (0);
366 
367 	/*
368  	 * The dynamic loader may be called from a thread, we have
369 	 * limited amounts of stack available so we cannot use alloca().
370 	 */
371 	cache = calloc(obj->dynsymcount, sizeof(SymCache));
372 	/* No need to check for NULL here */
373 
374 	rellim = (const Elf_Rel *)((caddr_t)obj->rel + obj->relsize);
375 	for (rel = obj->rel; rel < rellim; rel++) {
376 		if (reloc_nonplt_object(obj, rel, cache, flags, lockstate) < 0)
377 			goto done;
378 	}
379 	r = 0;
380 done:
381 	if (cache != NULL)
382 		free(cache);
383 	return (r);
384 }
385 
386 /*
387  *  * Process the PLT relocations.
388  *   */
389 int
390 reloc_plt(Obj_Entry *obj)
391 {
392 	const Elf_Rel *rellim;
393 	const Elf_Rel *rel;
394 
395 	rellim = (const Elf_Rel *)((char *)obj->pltrel +
396 	    obj->pltrelsize);
397 	for (rel = obj->pltrel;  rel < rellim;  rel++) {
398 		Elf_Addr *where;
399 
400 		assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
401 
402 		where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
403 		*where += (Elf_Addr )obj->relocbase;
404 	}
405 
406 	return (0);
407 }
408 
409 /*
410  *  * LD_BIND_NOW was set - force relocation for all jump slots
411  *   */
412 int
413 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
414 {
415 	const Obj_Entry *defobj;
416 	const Elf_Rel *rellim;
417 	const Elf_Rel *rel;
418 	const Elf_Sym *def;
419 	Elf_Addr *where;
420 	Elf_Addr target;
421 
422 	rellim = (const Elf_Rel *)((char *)obj->pltrel + obj->pltrelsize);
423 	for (rel = obj->pltrel; rel < rellim; rel++) {
424 		assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
425 		where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
426 		def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
427 		    SYMLOOK_IN_PLT | flags, NULL, lockstate);
428 		if (def == NULL) {
429 			dbg("reloc_jmpslots: sym not found");
430 			return (-1);
431 		}
432 
433 		target = (Elf_Addr)(defobj->relocbase + def->st_value);
434 		reloc_jmpslot(where, target, defobj, obj,
435 		    (const Elf_Rel *) rel);
436 	}
437 
438 	obj->jmpslots_done = true;
439 
440 	return (0);
441 }
442 
443 int
444 reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
445 {
446 
447 	/* XXX not implemented */
448 	return (0);
449 }
450 
451 int
452 reloc_gnu_ifunc(Obj_Entry *obj, int flags,
453     struct Struct_RtldLockState *lockstate)
454 {
455 
456 	/* XXX not implemented */
457 	return (0);
458 }
459 
460 Elf_Addr
461 reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *defobj,
462     		const Obj_Entry *obj, const Elf_Rel *rel)
463 {
464 
465 	assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
466 
467 	if (*where != target)
468 		*where = target;
469 
470 	return target;
471 }
472 
473 void
474 allocate_initial_tls(Obj_Entry *objs)
475 {
476 #ifdef ARM_TP_ADDRESS
477 	void **_tp = (void **)ARM_TP_ADDRESS;
478 #endif
479 
480 	/*
481 	* Fix the size of the static TLS block by using the maximum
482 	* offset allocated so far and adding a bit for dynamic modules to
483 	* use.
484 	*/
485 
486 	tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA;
487 
488 #ifdef ARM_TP_ADDRESS
489 	(*_tp) = (void *) allocate_tls(objs, NULL, TLS_TCB_SIZE, 8);
490 #else
491 	sysarch(ARM_SET_TP, allocate_tls(objs, NULL, TLS_TCB_SIZE, 8));
492 #endif
493 }
494 
495 void *
496 __tls_get_addr(tls_index* ti)
497 {
498 	char *p;
499 #ifdef ARM_TP_ADDRESS
500 	void **_tp = (void **)ARM_TP_ADDRESS;
501 
502 	p = tls_get_addr_common((Elf_Addr **)(*_tp), ti->ti_module, ti->ti_offset);
503 #else
504 	void *_tp;
505 	__asm __volatile("mrc  p15, 0, %0, c13, c0, 3"		\
506 	    : "=r" (_tp));
507 	p = tls_get_addr_common((Elf_Addr **)(_tp), ti->ti_module, ti->ti_offset);
508 #endif
509 
510 	return (p);
511 }
512