xref: /freebsd/stand/common/load_elf.c (revision 19261079)
1 /*-
2  * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
3  * Copyright (c) 1998 Peter Wemm <peter@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/endian.h>
33 #include <sys/exec.h>
34 #include <sys/linker.h>
35 #include <sys/module.h>
36 #include <sys/stdint.h>
37 #include <string.h>
38 #include <machine/elf.h>
39 #include <stand.h>
40 #define FREEBSD_ELF
41 #include <sys/link_elf.h>
42 #include <gfx_fb.h>
43 
44 #include "bootstrap.h"
45 
46 #define COPYOUT(s,d,l)	archsw.arch_copyout((vm_offset_t)(s), d, l)
47 
48 #if defined(__i386__) && __ELF_WORD_SIZE == 64
49 #undef ELF_TARG_CLASS
50 #undef ELF_TARG_MACH
51 #define ELF_TARG_CLASS  ELFCLASS64
52 #define ELF_TARG_MACH   EM_X86_64
53 #endif
54 
55 typedef struct elf_file {
56 	Elf_Phdr	*ph;
57 	Elf_Ehdr	*ehdr;
58 	Elf_Sym		*symtab;
59 	Elf_Hashelt	*hashtab;
60 	Elf_Hashelt	nbuckets;
61 	Elf_Hashelt	nchains;
62 	Elf_Hashelt	*buckets;
63 	Elf_Hashelt	*chains;
64 	Elf_Rel	*rel;
65 	size_t	relsz;
66 	Elf_Rela	*rela;
67 	size_t	relasz;
68 	char	*strtab;
69 	size_t	strsz;
70 	int		fd;
71 	caddr_t	firstpage;
72 	size_t	firstlen;
73 	int		kernel;
74 	uint64_t	off;
75 #ifdef LOADER_VERIEXEC_VECTX
76 	struct vectx	*vctx;
77 #endif
78 } *elf_file_t;
79 
80 #ifdef LOADER_VERIEXEC_VECTX
81 #define VECTX_HANDLE(ef) (ef)->vctx
82 #else
83 #define VECTX_HANDLE(ef) (ef)->fd
84 #endif
85 
86 static int __elfN(loadimage)(struct preloaded_file *mp, elf_file_t ef,
87     uint64_t loadaddr);
88 static int __elfN(lookup_symbol)(elf_file_t ef, const char* name,
89     Elf_Sym *sym, unsigned char type);
90 static int __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef,
91     Elf_Addr p, void *val, size_t len);
92 static int __elfN(parse_modmetadata)(struct preloaded_file *mp, elf_file_t ef,
93     Elf_Addr p_start, Elf_Addr p_end);
94 static bool __elfN(parse_vt_drv_set)(struct preloaded_file *mp, elf_file_t ef,
95     Elf_Addr p_start, Elf_Addr p_end);
96 static symaddr_fn __elfN(symaddr);
97 static char	*fake_modname(const char *name);
98 
99 const char	*__elfN(kerneltype) = "elf kernel";
100 const char	*__elfN(moduletype) = "elf module";
101 
102 uint64_t	__elfN(relocation_offset) = 0;
103 
104 extern void elf_wrong_field_size(void);
105 #define CONVERT_FIELD(b, f, e)			\
106 	switch (sizeof((b)->f)) {		\
107 	case 2:					\
108 		(b)->f = e ## 16toh((b)->f);	\
109 		break;				\
110 	case 4:					\
111 		(b)->f = e ## 32toh((b)->f);	\
112 		break;				\
113 	case 8:					\
114 		(b)->f = e ## 64toh((b)->f);	\
115 		break;				\
116 	default:				\
117 		/* Force a link time error. */	\
118 		elf_wrong_field_size();		\
119 		break;				\
120 	}
121 
122 #define CONVERT_SWITCH(h, d, f)			\
123 	switch ((h)->e_ident[EI_DATA]) {	\
124 	case ELFDATA2MSB:			\
125 		f(d, be);			\
126 		break;				\
127 	case ELFDATA2LSB:			\
128 		f(d, le);			\
129 		break;				\
130 	default:				\
131 		return (EINVAL);		\
132 	}
133 
134 
135 static int elf_header_convert(Elf_Ehdr *ehdr)
136 {
137 	/*
138 	 * Fixup ELF header endianness.
139 	 *
140 	 * The Xhdr structure was loaded using block read call to optimize file
141 	 * accesses. It might happen, that the endianness of the system memory
142 	 * is different that endianness of the ELF header.  Swap fields here to
143 	 * guarantee that Xhdr always contain valid data regardless of
144 	 * architecture.
145 	 */
146 #define HEADER_FIELDS(b, e)			\
147 	CONVERT_FIELD(b, e_type, e);		\
148 	CONVERT_FIELD(b, e_machine, e);		\
149 	CONVERT_FIELD(b, e_version, e);		\
150 	CONVERT_FIELD(b, e_entry, e);		\
151 	CONVERT_FIELD(b, e_phoff, e);		\
152 	CONVERT_FIELD(b, e_shoff, e);		\
153 	CONVERT_FIELD(b, e_flags, e);		\
154 	CONVERT_FIELD(b, e_ehsize, e);		\
155 	CONVERT_FIELD(b, e_phentsize, e);	\
156 	CONVERT_FIELD(b, e_phnum, e);		\
157 	CONVERT_FIELD(b, e_shentsize, e);	\
158 	CONVERT_FIELD(b, e_shnum, e);		\
159 	CONVERT_FIELD(b, e_shstrndx, e)
160 
161 	CONVERT_SWITCH(ehdr, ehdr, HEADER_FIELDS);
162 
163 #undef HEADER_FIELDS
164 
165 	return (0);
166 }
167 
168 static int elf_program_header_convert(const Elf_Ehdr *ehdr, Elf_Phdr *phdr)
169 {
170 #define PROGRAM_HEADER_FIELDS(b, e)		\
171 	CONVERT_FIELD(b, p_type, e);		\
172 	CONVERT_FIELD(b, p_flags, e);		\
173 	CONVERT_FIELD(b, p_offset, e);		\
174 	CONVERT_FIELD(b, p_vaddr, e);		\
175 	CONVERT_FIELD(b, p_paddr, e);		\
176 	CONVERT_FIELD(b, p_filesz, e);		\
177 	CONVERT_FIELD(b, p_memsz, e);		\
178 	CONVERT_FIELD(b, p_align, e)
179 
180 	CONVERT_SWITCH(ehdr, phdr, PROGRAM_HEADER_FIELDS);
181 
182 #undef PROGRAM_HEADER_FIELDS
183 
184 	return (0);
185 }
186 
187 static int elf_section_header_convert(const Elf_Ehdr *ehdr, Elf_Shdr *shdr)
188 {
189 #define SECTION_HEADER_FIELDS(b, e)		\
190 	CONVERT_FIELD(b, sh_name, e);		\
191 	CONVERT_FIELD(b, sh_type, e);		\
192 	CONVERT_FIELD(b, sh_link, e);		\
193 	CONVERT_FIELD(b, sh_info, e);		\
194 	CONVERT_FIELD(b, sh_flags, e);		\
195 	CONVERT_FIELD(b, sh_addr, e);		\
196 	CONVERT_FIELD(b, sh_offset, e);		\
197 	CONVERT_FIELD(b, sh_size, e);		\
198 	CONVERT_FIELD(b, sh_addralign, e);	\
199 	CONVERT_FIELD(b, sh_entsize, e)
200 
201 	CONVERT_SWITCH(ehdr, shdr, SECTION_HEADER_FIELDS);
202 
203 #undef SECTION_HEADER_FIELDS
204 
205 	return (0);
206 }
207 #undef CONVERT_SWITCH
208 #undef CONVERT_FIELD
209 
210 
211 #ifdef __amd64__
212 static bool
213 is_kernphys_relocatable(elf_file_t ef)
214 {
215 	Elf_Sym sym;
216 
217 	return (__elfN(lookup_symbol)(ef, "kernphys", &sym, STT_OBJECT) == 0 &&
218 	    sym.st_size == 8);
219 }
220 #endif
221 
222 static int
223 __elfN(load_elf_header)(char *filename, elf_file_t ef)
224 {
225 	ssize_t			 bytes_read;
226 	Elf_Ehdr		*ehdr;
227 	int			 err;
228 
229 	/*
230 	 * Open the image, read and validate the ELF header
231 	 */
232 	if (filename == NULL)	/* can't handle nameless */
233 		return (EFTYPE);
234 	if ((ef->fd = open(filename, O_RDONLY)) == -1)
235 		return (errno);
236 	ef->firstpage = malloc(PAGE_SIZE);
237 	if (ef->firstpage == NULL) {
238 		close(ef->fd);
239 		return (ENOMEM);
240 	}
241 #ifdef LOADER_VERIEXEC_VECTX
242 	{
243 		int verror;
244 
245 		ef->vctx = vectx_open(ef->fd, filename, 0L, NULL, &verror, __func__);
246 		if (verror) {
247 			printf("Unverified %s: %s\n", filename, ve_error_get());
248 			close(ef->fd);
249 			free(ef->vctx);
250 			return (EAUTH);
251 		}
252 	}
253 #endif
254 	bytes_read = VECTX_READ(VECTX_HANDLE(ef), ef->firstpage, PAGE_SIZE);
255 	ef->firstlen = (size_t)bytes_read;
256 	if (bytes_read < 0 || ef->firstlen <= sizeof(Elf_Ehdr)) {
257 		err = EFTYPE; /* could be EIO, but may be small file */
258 		goto error;
259 	}
260 	ehdr = ef->ehdr = (Elf_Ehdr *)ef->firstpage;
261 
262 	/* Is it ELF? */
263 	if (!IS_ELF(*ehdr)) {
264 		err = EFTYPE;
265 		goto error;
266 	}
267 
268 	if (ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || /* Layout ? */
269 	    ehdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
270 	    ehdr->e_ident[EI_VERSION] != EV_CURRENT) /* Version ? */ {
271 		err = EFTYPE;
272 		goto error;
273 	}
274 
275 	err = elf_header_convert(ehdr);
276 	if (err)
277 		goto error;
278 
279 	if (ehdr->e_version != EV_CURRENT || ehdr->e_machine != ELF_TARG_MACH) {
280 		/* Machine ? */
281 		err = EFTYPE;
282 		goto error;
283 	}
284 
285 #if defined(LOADER_VERIEXEC) && !defined(LOADER_VERIEXEC_VECTX)
286 	if (verify_file(ef->fd, filename, bytes_read, VE_MUST, __func__) < 0) {
287 		err = EAUTH;
288 		goto error;
289 	}
290 #endif
291 	return (0);
292 
293 error:
294 	if (ef->firstpage != NULL) {
295 		free(ef->firstpage);
296 		ef->firstpage = NULL;
297 	}
298 	if (ef->fd != -1) {
299 #ifdef LOADER_VERIEXEC_VECTX
300 		free(ef->vctx);
301 #endif
302 		close(ef->fd);
303 		ef->fd = -1;
304 	}
305 	return (err);
306 }
307 
308 /*
309  * Attempt to load the file (file) as an ELF module.  It will be stored at
310  * (dest), and a pointer to a module structure describing the loaded object
311  * will be saved in (result).
312  */
313 int
314 __elfN(loadfile)(char *filename, uint64_t dest, struct preloaded_file **result)
315 {
316 	return (__elfN(loadfile_raw)(filename, dest, result, 0));
317 }
318 
319 int
320 __elfN(loadfile_raw)(char *filename, uint64_t dest,
321     struct preloaded_file **result, int multiboot)
322 {
323 	struct preloaded_file	*fp, *kfp;
324 	struct elf_file		ef;
325 	Elf_Ehdr		*ehdr;
326 	int			err;
327 
328 	fp = NULL;
329 	bzero(&ef, sizeof(struct elf_file));
330 	ef.fd = -1;
331 
332 	err = __elfN(load_elf_header)(filename, &ef);
333 	if (err != 0)
334 		return (err);
335 
336 	ehdr = ef.ehdr;
337 
338 	/*
339 	 * Check to see what sort of module we are.
340 	 */
341 	kfp = file_findfile(NULL, __elfN(kerneltype));
342 #ifdef __powerpc__
343 	/*
344 	 * Kernels can be ET_DYN, so just assume the first loaded object is the
345 	 * kernel. This assumption will be checked later.
346 	 */
347 	if (kfp == NULL)
348 		ef.kernel = 1;
349 #endif
350 	if (ef.kernel || ehdr->e_type == ET_EXEC) {
351 		/* Looks like a kernel */
352 		if (kfp != NULL) {
353 			printf("elf" __XSTRING(__ELF_WORD_SIZE)
354 			    "_loadfile: kernel already loaded\n");
355 			err = EPERM;
356 			goto oerr;
357 		}
358 		/*
359 		 * Calculate destination address based on kernel entrypoint.
360 		 *
361 		 * For ARM, the destination address is independent of any values
362 		 * in the elf header (an ARM kernel can be loaded at any 2MB
363 		 * boundary), so we leave dest set to the value calculated by
364 		 * archsw.arch_loadaddr() and passed in to this function.
365 		 */
366 #ifndef __arm__
367 		if (ehdr->e_type == ET_EXEC)
368 			dest = (ehdr->e_entry & ~PAGE_MASK);
369 #endif
370 		if ((ehdr->e_entry & ~PAGE_MASK) == 0) {
371 			printf("elf" __XSTRING(__ELF_WORD_SIZE)
372 			    "_loadfile: not a kernel (maybe static binary?)\n");
373 			err = EPERM;
374 			goto oerr;
375 		}
376 		ef.kernel = 1;
377 
378 	} else if (ehdr->e_type == ET_DYN) {
379 		/* Looks like a kld module */
380 		if (multiboot != 0) {
381 			printf("elf" __XSTRING(__ELF_WORD_SIZE)
382 			    "_loadfile: can't load module as multiboot\n");
383 			err = EPERM;
384 			goto oerr;
385 		}
386 		if (kfp == NULL) {
387 			printf("elf" __XSTRING(__ELF_WORD_SIZE)
388 			    "_loadfile: can't load module before kernel\n");
389 			err = EPERM;
390 			goto oerr;
391 		}
392 		if (strcmp(__elfN(kerneltype), kfp->f_type)) {
393 			printf("elf" __XSTRING(__ELF_WORD_SIZE)
394 			 "_loadfile: can't load module with kernel type '%s'\n",
395 			    kfp->f_type);
396 			err = EPERM;
397 			goto oerr;
398 		}
399 		/* Looks OK, got ahead */
400 		ef.kernel = 0;
401 
402 	} else {
403 		err = EFTYPE;
404 		goto oerr;
405 	}
406 
407 	if (archsw.arch_loadaddr != NULL)
408 		dest = archsw.arch_loadaddr(LOAD_ELF, ehdr, dest);
409 	else
410 		dest = roundup(dest, PAGE_SIZE);
411 
412 	/*
413 	 * Ok, we think we should handle this.
414 	 */
415 	fp = file_alloc();
416 	if (fp == NULL) {
417 		printf("elf" __XSTRING(__ELF_WORD_SIZE)
418 		    "_loadfile: cannot allocate module info\n");
419 		err = EPERM;
420 		goto out;
421 	}
422 	if (ef.kernel == 1 && multiboot == 0)
423 		setenv("kernelname", filename, 1);
424 	fp->f_name = strdup(filename);
425 	if (multiboot == 0)
426 		fp->f_type = strdup(ef.kernel ?
427 		    __elfN(kerneltype) : __elfN(moduletype));
428 	else
429 		fp->f_type = strdup("elf multiboot kernel");
430 
431 #ifdef ELF_VERBOSE
432 	if (ef.kernel)
433 		printf("%s entry at 0x%jx\n", filename,
434 		    (uintmax_t)ehdr->e_entry);
435 #else
436 	printf("%s ", filename);
437 #endif
438 
439 	fp->f_size = __elfN(loadimage)(fp, &ef, dest);
440 	if (fp->f_size == 0 || fp->f_addr == 0)
441 		goto ioerr;
442 
443 	/* save exec header as metadata */
444 	file_addmetadata(fp, MODINFOMD_ELFHDR, sizeof(*ehdr), ehdr);
445 
446 	/* Load OK, return module pointer */
447 	*result = (struct preloaded_file *)fp;
448 	err = 0;
449 #ifdef __amd64__
450 	fp->f_kernphys_relocatable = multiboot || is_kernphys_relocatable(&ef);
451 #endif
452 	goto out;
453 
454 ioerr:
455 	err = EIO;
456 oerr:
457 	file_discard(fp);
458 out:
459 	if (ef.firstpage)
460 		free(ef.firstpage);
461 	if (ef.fd != -1) {
462 #ifdef LOADER_VERIEXEC_VECTX
463 		if (!err && ef.vctx) {
464 			int verror;
465 
466 			verror = vectx_close(ef.vctx, VE_MUST, __func__);
467 			if (verror) {
468 				err = EAUTH;
469 				file_discard(fp);
470 			}
471 		}
472 #endif
473 		close(ef.fd);
474 	}
475 	return (err);
476 }
477 
478 /*
479  * With the file (fd) open on the image, and (ehdr) containing
480  * the Elf header, load the image at (off)
481  */
482 static int
483 __elfN(loadimage)(struct preloaded_file *fp, elf_file_t ef, uint64_t off)
484 {
485 	int		i;
486 	u_int		j;
487 	Elf_Ehdr	*ehdr;
488 	Elf_Phdr	*phdr, *php;
489 	Elf_Shdr	*shdr;
490 	char		*shstr;
491 	int		ret;
492 	vm_offset_t	firstaddr;
493 	vm_offset_t	lastaddr;
494 	size_t		chunk;
495 	ssize_t		result;
496 	Elf_Addr	ssym, esym;
497 	Elf_Dyn		*dp;
498 	Elf_Addr	adp;
499 	Elf_Addr	ctors;
500 	int		ndp;
501 	int		symstrindex;
502 	int		symtabindex;
503 	Elf_Size	size;
504 	u_int		fpcopy;
505 	Elf_Sym		sym;
506 	Elf_Addr	p_start, p_end;
507 
508 	dp = NULL;
509 	shdr = NULL;
510 	ret = 0;
511 	firstaddr = lastaddr = 0;
512 	ehdr = ef->ehdr;
513 #ifdef __powerpc__
514 	if (ef->kernel) {
515 #else
516 	if (ehdr->e_type == ET_EXEC) {
517 #endif
518 #if defined(__i386__) || defined(__amd64__)
519 #if __ELF_WORD_SIZE == 64
520 		/* x86_64 relocates after locore */
521 		off = - (off & 0xffffffffff000000ull);
522 #else
523 		/* i386 relocates after locore */
524 		off = - (off & 0xff000000u);
525 #endif
526 #elif defined(__powerpc__)
527 		/*
528 		 * On the purely virtual memory machines like e500, the kernel
529 		 * is linked against its final VA range, which is most often
530 		 * not available at the loader stage, but only after kernel
531 		 * initializes and completes its VM settings. In such cases we
532 		 * cannot use p_vaddr field directly to load ELF segments, but
533 		 * put them at some 'load-time' locations.
534 		 */
535 		if (off & 0xf0000000u) {
536 			off = -(off & 0xf0000000u);
537 			/*
538 			 * XXX the physical load address should not be
539 			 * hardcoded. Note that the Book-E kernel assumes that
540 			 * it's loaded at a 16MB boundary for now...
541 			 */
542 			off += 0x01000000;
543 		}
544 		ehdr->e_entry += off;
545 #ifdef ELF_VERBOSE
546 		printf("Converted entry 0x%jx\n", (uintmax_t)ehdr->e_entry);
547 #endif
548 #elif defined(__arm__) && !defined(EFI)
549 		/*
550 		 * The elf headers in arm kernels specify virtual addresses in
551 		 * all header fields, even the ones that should be physical
552 		 * addresses.  We assume the entry point is in the first page,
553 		 * and masking the page offset will leave us with the virtual
554 		 * address the kernel was linked at.  We subtract that from the
555 		 * load offset, making 'off' into the value which, when added
556 		 * to a virtual address in an elf header, translates it to a
557 		 * physical address.  We do the va->pa conversion on the entry
558 		 * point address in the header now, so that later we can launch
559 		 * the kernel by just jumping to that address.
560 		 *
561 		 * When booting from UEFI the copyin and copyout functions
562 		 * handle adjusting the location relative to the first virtual
563 		 * address.  Because of this there is no need to adjust the
564 		 * offset or entry point address as these will both be handled
565 		 * by the efi code.
566 		 */
567 		off -= ehdr->e_entry & ~PAGE_MASK;
568 		ehdr->e_entry += off;
569 #ifdef ELF_VERBOSE
570 		printf("ehdr->e_entry 0x%jx, va<->pa off %llx\n",
571 		    (uintmax_t)ehdr->e_entry, off);
572 #endif
573 #else
574 		off = 0;	/* other archs use direct mapped kernels */
575 #endif
576 	}
577 	ef->off = off;
578 
579 	if (ef->kernel)
580 		__elfN(relocation_offset) = off;
581 
582 	if ((ehdr->e_phoff + ehdr->e_phnum * sizeof(*phdr)) > ef->firstlen) {
583 		printf("elf" __XSTRING(__ELF_WORD_SIZE)
584 		    "_loadimage: program header not within first page\n");
585 		goto out;
586 	}
587 	phdr = (Elf_Phdr *)(ef->firstpage + ehdr->e_phoff);
588 
589 	for (i = 0; i < ehdr->e_phnum; i++) {
590 		if (elf_program_header_convert(ehdr, phdr))
591 			continue;
592 
593 		/* We want to load PT_LOAD segments only.. */
594 		if (phdr[i].p_type != PT_LOAD)
595 			continue;
596 
597 #ifdef ELF_VERBOSE
598 		printf("Segment: 0x%lx@0x%lx -> 0x%lx-0x%lx",
599 		    (long)phdr[i].p_filesz, (long)phdr[i].p_offset,
600 		    (long)(phdr[i].p_vaddr + off),
601 		    (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz - 1));
602 #else
603 		if ((phdr[i].p_flags & PF_W) == 0) {
604 			printf("text=0x%lx ", (long)phdr[i].p_filesz);
605 		} else {
606 			printf("data=0x%lx", (long)phdr[i].p_filesz);
607 			if (phdr[i].p_filesz < phdr[i].p_memsz)
608 				printf("+0x%lx", (long)(phdr[i].p_memsz -
609 				    phdr[i].p_filesz));
610 			printf(" ");
611 		}
612 #endif
613 		fpcopy = 0;
614 		if (ef->firstlen > phdr[i].p_offset) {
615 			fpcopy = ef->firstlen - phdr[i].p_offset;
616 			archsw.arch_copyin(ef->firstpage + phdr[i].p_offset,
617 			    phdr[i].p_vaddr + off, fpcopy);
618 		}
619 		if (phdr[i].p_filesz > fpcopy) {
620 			if (kern_pread(VECTX_HANDLE(ef),
621 			    phdr[i].p_vaddr + off + fpcopy,
622 			    phdr[i].p_filesz - fpcopy,
623 			    phdr[i].p_offset + fpcopy) != 0) {
624 				printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
625 				    "_loadimage: read failed\n");
626 				goto out;
627 			}
628 		}
629 		/* clear space from oversized segments; eg: bss */
630 		if (phdr[i].p_filesz < phdr[i].p_memsz) {
631 #ifdef ELF_VERBOSE
632 			printf(" (bss: 0x%lx-0x%lx)",
633 			    (long)(phdr[i].p_vaddr + off + phdr[i].p_filesz),
634 			    (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz -1));
635 #endif
636 
637 			kern_bzero(phdr[i].p_vaddr + off + phdr[i].p_filesz,
638 			    phdr[i].p_memsz - phdr[i].p_filesz);
639 		}
640 #ifdef ELF_VERBOSE
641 		printf("\n");
642 #endif
643 
644 		if (archsw.arch_loadseg != NULL)
645 			archsw.arch_loadseg(ehdr, phdr + i, off);
646 
647 		if (firstaddr == 0 || firstaddr > (phdr[i].p_vaddr + off))
648 			firstaddr = phdr[i].p_vaddr + off;
649 		if (lastaddr == 0 || lastaddr <
650 		    (phdr[i].p_vaddr + off + phdr[i].p_memsz))
651 			lastaddr = phdr[i].p_vaddr + off + phdr[i].p_memsz;
652 	}
653 	lastaddr = roundup(lastaddr, sizeof(long));
654 
655 	/*
656 	 * Get the section headers.  We need this for finding the .ctors
657 	 * section as well as for loading any symbols.  Both may be hard
658 	 * to do if reading from a .gz file as it involves seeking.  I
659 	 * think the rule is going to have to be that you must strip a
660 	 * file to remove symbols before gzipping it.
661 	 */
662 	chunk = (size_t)ehdr->e_shnum * (size_t)ehdr->e_shentsize;
663 	if (chunk == 0 || ehdr->e_shoff == 0)
664 		goto nosyms;
665 	shdr = alloc_pread(VECTX_HANDLE(ef), ehdr->e_shoff, chunk);
666 	if (shdr == NULL) {
667 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
668 		    "_loadimage: failed to read section headers");
669 		goto nosyms;
670 	}
671 
672 	for (i = 0; i < ehdr->e_shnum; i++)
673 		elf_section_header_convert(ehdr, &shdr[i]);
674 
675 	file_addmetadata(fp, MODINFOMD_SHDR, chunk, shdr);
676 
677 	/*
678 	 * Read the section string table and look for the .ctors section.
679 	 * We need to tell the kernel where it is so that it can call the
680 	 * ctors.
681 	 */
682 	chunk = shdr[ehdr->e_shstrndx].sh_size;
683 	if (chunk) {
684 		shstr = alloc_pread(VECTX_HANDLE(ef),
685 		    shdr[ehdr->e_shstrndx].sh_offset, chunk);
686 		if (shstr) {
687 			for (i = 0; i < ehdr->e_shnum; i++) {
688 				if (strcmp(shstr + shdr[i].sh_name,
689 				    ".ctors") != 0)
690 					continue;
691 				ctors = shdr[i].sh_addr;
692 				file_addmetadata(fp, MODINFOMD_CTORS_ADDR,
693 				    sizeof(ctors), &ctors);
694 				size = shdr[i].sh_size;
695 				file_addmetadata(fp, MODINFOMD_CTORS_SIZE,
696 				    sizeof(size), &size);
697 				break;
698 			}
699 			free(shstr);
700 		}
701 	}
702 
703 	/*
704 	 * Now load any symbols.
705 	 */
706 	symtabindex = -1;
707 	symstrindex = -1;
708 	for (i = 0; i < ehdr->e_shnum; i++) {
709 		if (shdr[i].sh_type != SHT_SYMTAB)
710 			continue;
711 		for (j = 0; j < ehdr->e_phnum; j++) {
712 			if (phdr[j].p_type != PT_LOAD)
713 				continue;
714 			if (shdr[i].sh_offset >= phdr[j].p_offset &&
715 			    (shdr[i].sh_offset + shdr[i].sh_size <=
716 			    phdr[j].p_offset + phdr[j].p_filesz)) {
717 				shdr[i].sh_offset = 0;
718 				shdr[i].sh_size = 0;
719 				break;
720 			}
721 		}
722 		if (shdr[i].sh_offset == 0 || shdr[i].sh_size == 0)
723 			continue;	/* alread loaded in a PT_LOAD above */
724 		/* Save it for loading below */
725 		symtabindex = i;
726 		symstrindex = shdr[i].sh_link;
727 	}
728 	if (symtabindex < 0 || symstrindex < 0)
729 		goto nosyms;
730 
731 	/* Ok, committed to a load. */
732 #ifndef ELF_VERBOSE
733 	printf("syms=[");
734 #endif
735 	ssym = lastaddr;
736 	for (i = symtabindex; i >= 0; i = symstrindex) {
737 #ifdef ELF_VERBOSE
738 		char	*secname;
739 
740 		switch(shdr[i].sh_type) {
741 		case SHT_SYMTAB:		/* Symbol table */
742 			secname = "symtab";
743 			break;
744 		case SHT_STRTAB:		/* String table */
745 			secname = "strtab";
746 			break;
747 		default:
748 			secname = "WHOA!!";
749 			break;
750 		}
751 #endif
752 		size = shdr[i].sh_size;
753 #if defined(__powerpc__)
754   #if __ELF_WORD_SIZE == 64
755 		size = htobe64(size);
756   #else
757 		size = htobe32(size);
758   #endif
759 #endif
760 
761 		archsw.arch_copyin(&size, lastaddr, sizeof(size));
762 		lastaddr += sizeof(size);
763 
764 #ifdef ELF_VERBOSE
765 		printf("\n%s: 0x%jx@0x%jx -> 0x%jx-0x%jx", secname,
766 		    (uintmax_t)shdr[i].sh_size, (uintmax_t)shdr[i].sh_offset,
767 		    (uintmax_t)lastaddr,
768 		    (uintmax_t)(lastaddr + shdr[i].sh_size));
769 #else
770 		if (i == symstrindex)
771 			printf("+");
772 		printf("0x%lx+0x%lx", (long)sizeof(size), (long)size);
773 #endif
774 
775 		if (VECTX_LSEEK(VECTX_HANDLE(ef), (off_t)shdr[i].sh_offset, SEEK_SET) == -1) {
776 			printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
777 			   "_loadimage: could not seek for symbols - skipped!");
778 			lastaddr = ssym;
779 			ssym = 0;
780 			goto nosyms;
781 		}
782 		result = archsw.arch_readin(VECTX_HANDLE(ef), lastaddr, shdr[i].sh_size);
783 		if (result < 0 || (size_t)result != shdr[i].sh_size) {
784 			printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
785 			    "_loadimage: could not read symbols - skipped! "
786 			    "(%ju != %ju)", (uintmax_t)result,
787 			    (uintmax_t)shdr[i].sh_size);
788 			lastaddr = ssym;
789 			ssym = 0;
790 			goto nosyms;
791 		}
792 		/* Reset offsets relative to ssym */
793 		lastaddr += shdr[i].sh_size;
794 		lastaddr = roundup(lastaddr, sizeof(size));
795 		if (i == symtabindex)
796 			symtabindex = -1;
797 		else if (i == symstrindex)
798 			symstrindex = -1;
799 	}
800 	esym = lastaddr;
801 #ifndef ELF_VERBOSE
802 	printf("]");
803 #endif
804 
805 #if defined(__powerpc__)
806   /* On PowerPC we always need to provide BE data to the kernel */
807   #if __ELF_WORD_SIZE == 64
808 	ssym = htobe64((uint64_t)ssym);
809 	esym = htobe64((uint64_t)esym);
810   #else
811 	ssym = htobe32((uint32_t)ssym);
812 	esym = htobe32((uint32_t)esym);
813   #endif
814 #endif
815 
816 	file_addmetadata(fp, MODINFOMD_SSYM, sizeof(ssym), &ssym);
817 	file_addmetadata(fp, MODINFOMD_ESYM, sizeof(esym), &esym);
818 
819 nosyms:
820 	printf("\n");
821 
822 	ret = lastaddr - firstaddr;
823 	fp->f_addr = firstaddr;
824 
825 	php = NULL;
826 	for (i = 0; i < ehdr->e_phnum; i++) {
827 		if (phdr[i].p_type == PT_DYNAMIC) {
828 			php = phdr + i;
829 			adp = php->p_vaddr;
830 			file_addmetadata(fp, MODINFOMD_DYNAMIC, sizeof(adp),
831 			    &adp);
832 			break;
833 		}
834 	}
835 
836 	if (php == NULL) /* this is bad, we cannot get to symbols or _DYNAMIC */
837 		goto out;
838 
839 	ndp = php->p_filesz / sizeof(Elf_Dyn);
840 	if (ndp == 0)
841 		goto out;
842 	dp = malloc(php->p_filesz);
843 	if (dp == NULL)
844 		goto out;
845 	archsw.arch_copyout(php->p_vaddr + off, dp, php->p_filesz);
846 
847 	ef->strsz = 0;
848 	for (i = 0; i < ndp; i++) {
849 		if (dp[i].d_tag == 0)
850 			break;
851 		switch (dp[i].d_tag) {
852 		case DT_HASH:
853 			ef->hashtab =
854 			    (Elf_Hashelt*)(uintptr_t)(dp[i].d_un.d_ptr + off);
855 			break;
856 		case DT_STRTAB:
857 			ef->strtab =
858 			    (char *)(uintptr_t)(dp[i].d_un.d_ptr + off);
859 			break;
860 		case DT_STRSZ:
861 			ef->strsz = dp[i].d_un.d_val;
862 			break;
863 		case DT_SYMTAB:
864 			ef->symtab =
865 			    (Elf_Sym *)(uintptr_t)(dp[i].d_un.d_ptr + off);
866 			break;
867 		case DT_REL:
868 			ef->rel =
869 			    (Elf_Rel *)(uintptr_t)(dp[i].d_un.d_ptr + off);
870 			break;
871 		case DT_RELSZ:
872 			ef->relsz = dp[i].d_un.d_val;
873 			break;
874 		case DT_RELA:
875 			ef->rela =
876 			    (Elf_Rela *)(uintptr_t)(dp[i].d_un.d_ptr + off);
877 			break;
878 		case DT_RELASZ:
879 			ef->relasz = dp[i].d_un.d_val;
880 			break;
881 		default:
882 			break;
883 		}
884 	}
885 	if (ef->hashtab == NULL || ef->symtab == NULL ||
886 	    ef->strtab == NULL || ef->strsz == 0)
887 		goto out;
888 	COPYOUT(ef->hashtab, &ef->nbuckets, sizeof(ef->nbuckets));
889 	COPYOUT(ef->hashtab + 1, &ef->nchains, sizeof(ef->nchains));
890 	ef->buckets = ef->hashtab + 2;
891 	ef->chains = ef->buckets + ef->nbuckets;
892 
893 	if (!gfx_state.tg_kernel_supported &&
894 	    __elfN(lookup_symbol)(ef, "__start_set_vt_drv_set", &sym,
895 	    STT_NOTYPE) == 0) {
896 		p_start = sym.st_value + ef->off;
897 		if (__elfN(lookup_symbol)(ef, "__stop_set_vt_drv_set", &sym,
898 		    STT_NOTYPE) == 0) {
899 			p_end = sym.st_value + ef->off;
900 			gfx_state.tg_kernel_supported =
901 			    __elfN(parse_vt_drv_set)(fp, ef, p_start, p_end);
902 		}
903 	}
904 
905 	if (__elfN(lookup_symbol)(ef, "__start_set_modmetadata_set", &sym,
906 	    STT_NOTYPE) != 0)
907 		return 0;
908 	p_start = sym.st_value + ef->off;
909 	if (__elfN(lookup_symbol)(ef, "__stop_set_modmetadata_set", &sym,
910 	    STT_NOTYPE) != 0)
911 		return 0;
912 	p_end = sym.st_value + ef->off;
913 
914 	if (__elfN(parse_modmetadata)(fp, ef, p_start, p_end) == 0)
915 		goto out;
916 
917 	if (ef->kernel)		/* kernel must not depend on anything */
918 		goto out;
919 
920 out:
921 	if (dp)
922 		free(dp);
923 	if (shdr)
924 		free(shdr);
925 	return ret;
926 }
927 
928 static char invalid_name[] = "bad";
929 
930 char *
931 fake_modname(const char *name)
932 {
933 	const char *sp, *ep;
934 	char *fp;
935 	size_t len;
936 
937 	sp = strrchr(name, '/');
938 	if (sp)
939 		sp++;
940 	else
941 		sp = name;
942 
943 	ep = strrchr(sp, '.');
944 	if (ep == NULL) {
945 		ep = sp + strlen(sp);
946 	}
947 	if (ep == sp) {
948 		sp = invalid_name;
949 		ep = invalid_name + sizeof(invalid_name) - 1;
950 	}
951 
952 	len = ep - sp;
953 	fp = malloc(len + 1);
954 	if (fp == NULL)
955 		return NULL;
956 	memcpy(fp, sp, len);
957 	fp[len] = '\0';
958 	return fp;
959 }
960 
961 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
962 struct mod_metadata64 {
963 	int		md_version;	/* structure version MDTV_* */
964 	int		md_type;	/* type of entry MDT_* */
965 	uint64_t	md_data;	/* specific data */
966 	uint64_t	md_cval;	/* common string label */
967 };
968 #endif
969 #if defined(__amd64__) && __ELF_WORD_SIZE == 32
970 struct mod_metadata32 {
971 	int		md_version;	/* structure version MDTV_* */
972 	int		md_type;	/* type of entry MDT_* */
973 	uint32_t	md_data;	/* specific data */
974 	uint32_t	md_cval;	/* common string label */
975 };
976 #endif
977 
978 int
979 __elfN(load_modmetadata)(struct preloaded_file *fp, uint64_t dest)
980 {
981 	struct elf_file		 ef;
982 	int			 err, i, j;
983 	Elf_Shdr		*sh_meta, *shdr = NULL;
984 	Elf_Shdr		*sh_data[2];
985 	char			*shstrtab = NULL;
986 	size_t			 size;
987 	Elf_Addr		 p_start, p_end;
988 
989 	bzero(&ef, sizeof(struct elf_file));
990 	ef.fd = -1;
991 
992 	err = __elfN(load_elf_header)(fp->f_name, &ef);
993 	if (err != 0)
994 		goto out;
995 
996 	if (ef.kernel == 1 || ef.ehdr->e_type == ET_EXEC) {
997 		ef.kernel = 1;
998 	} else if (ef.ehdr->e_type != ET_DYN) {
999 		err = EFTYPE;
1000 		goto out;
1001 	}
1002 
1003 	size = (size_t)ef.ehdr->e_shnum * (size_t)ef.ehdr->e_shentsize;
1004 	shdr = alloc_pread(VECTX_HANDLE(&ef), ef.ehdr->e_shoff, size);
1005 	if (shdr == NULL) {
1006 		err = ENOMEM;
1007 		goto out;
1008 	}
1009 
1010 	/* Load shstrtab. */
1011 	shstrtab = alloc_pread(VECTX_HANDLE(&ef), shdr[ef.ehdr->e_shstrndx].sh_offset,
1012 	    shdr[ef.ehdr->e_shstrndx].sh_size);
1013 	if (shstrtab == NULL) {
1014 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1015 		    "load_modmetadata: unable to load shstrtab\n");
1016 		err = EFTYPE;
1017 		goto out;
1018 	}
1019 
1020 	/* Find set_modmetadata_set and data sections. */
1021 	sh_data[0] = sh_data[1] = sh_meta = NULL;
1022 	for (i = 0, j = 0; i < ef.ehdr->e_shnum; i++) {
1023 		if (strcmp(&shstrtab[shdr[i].sh_name],
1024 		    "set_modmetadata_set") == 0) {
1025 			sh_meta = &shdr[i];
1026 		}
1027 		if ((strcmp(&shstrtab[shdr[i].sh_name], ".data") == 0) ||
1028 		    (strcmp(&shstrtab[shdr[i].sh_name], ".rodata") == 0)) {
1029 			sh_data[j++] = &shdr[i];
1030 		}
1031 	}
1032 	if (sh_meta == NULL || sh_data[0] == NULL || sh_data[1] == NULL) {
1033 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1034     "load_modmetadata: unable to find set_modmetadata_set or data sections\n");
1035 		err = EFTYPE;
1036 		goto out;
1037 	}
1038 
1039 	/* Load set_modmetadata_set into memory */
1040 	err = kern_pread(VECTX_HANDLE(&ef), dest, sh_meta->sh_size, sh_meta->sh_offset);
1041 	if (err != 0) {
1042 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1043     "load_modmetadata: unable to load set_modmetadata_set: %d\n", err);
1044 		goto out;
1045 	}
1046 	p_start = dest;
1047 	p_end = dest + sh_meta->sh_size;
1048 	dest += sh_meta->sh_size;
1049 
1050 	/* Load data sections into memory. */
1051 	err = kern_pread(VECTX_HANDLE(&ef), dest, sh_data[0]->sh_size,
1052 	    sh_data[0]->sh_offset);
1053 	if (err != 0) {
1054 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1055 		    "load_modmetadata: unable to load data: %d\n", err);
1056 		goto out;
1057 	}
1058 
1059 	/*
1060 	 * We have to increment the dest, so that the offset is the same into
1061 	 * both the .rodata and .data sections.
1062 	 */
1063 	ef.off = -(sh_data[0]->sh_addr - dest);
1064 	dest +=	(sh_data[1]->sh_addr - sh_data[0]->sh_addr);
1065 
1066 	err = kern_pread(VECTX_HANDLE(&ef), dest, sh_data[1]->sh_size,
1067 	    sh_data[1]->sh_offset);
1068 	if (err != 0) {
1069 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1070 		    "load_modmetadata: unable to load data: %d\n", err);
1071 		goto out;
1072 	}
1073 
1074 	err = __elfN(parse_modmetadata)(fp, &ef, p_start, p_end);
1075 	if (err != 0) {
1076 		printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1077 		    "load_modmetadata: unable to parse metadata: %d\n", err);
1078 		goto out;
1079 	}
1080 
1081 out:
1082 	if (shstrtab != NULL)
1083 		free(shstrtab);
1084 	if (shdr != NULL)
1085 		free(shdr);
1086 	if (ef.firstpage != NULL)
1087 		free(ef.firstpage);
1088 	if (ef.fd != -1) {
1089 #ifdef LOADER_VERIEXEC_VECTX
1090 		if (!err && ef.vctx) {
1091 			int verror;
1092 
1093 			verror = vectx_close(ef.vctx, VE_MUST, __func__);
1094 			if (verror) {
1095 				err = EAUTH;
1096 				file_discard(fp);
1097 			}
1098 		}
1099 #endif
1100 		close(ef.fd);
1101 	}
1102 	return (err);
1103 }
1104 
1105 /*
1106  * Walk through vt_drv_set, each vt driver structure starts with
1107  * static 16 chars for driver name. If we have "vbefb", return true.
1108  */
1109 static bool
1110 __elfN(parse_vt_drv_set)(struct preloaded_file *fp, elf_file_t ef,
1111     Elf_Addr p_start, Elf_Addr p_end)
1112 {
1113 	Elf_Addr v, p;
1114 	char vd_name[16];
1115 	int error;
1116 
1117 	p = p_start;
1118 	while (p < p_end) {
1119 		COPYOUT(p, &v, sizeof(v));
1120 
1121 		error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v));
1122 		if (error == EOPNOTSUPP)
1123 			v += ef->off;
1124 		else if (error != 0)
1125 			return (false);
1126 		COPYOUT(v, &vd_name, sizeof(vd_name));
1127 		if (strncmp(vd_name, "vbefb", sizeof(vd_name)) == 0)
1128 			return (true);
1129 		p += sizeof(Elf_Addr);
1130 	}
1131 
1132 	return (false);
1133 }
1134 
1135 int
1136 __elfN(parse_modmetadata)(struct preloaded_file *fp, elf_file_t ef,
1137     Elf_Addr p_start, Elf_Addr p_end)
1138 {
1139 	struct mod_metadata md;
1140 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
1141 	struct mod_metadata64 md64;
1142 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
1143 	struct mod_metadata32 md32;
1144 #endif
1145 	struct mod_depend *mdepend;
1146 	struct mod_version mver;
1147 	char *s;
1148 	int error, modcnt, minfolen;
1149 	Elf_Addr v, p;
1150 
1151 	modcnt = 0;
1152 	p = p_start;
1153 	while (p < p_end) {
1154 		COPYOUT(p, &v, sizeof(v));
1155 		error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v));
1156 		if (error == EOPNOTSUPP)
1157 			v += ef->off;
1158 		else if (error != 0)
1159 			return (error);
1160 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
1161 		COPYOUT(v, &md64, sizeof(md64));
1162 		error = __elfN(reloc_ptr)(fp, ef, v, &md64, sizeof(md64));
1163 		if (error == EOPNOTSUPP) {
1164 			md64.md_cval += ef->off;
1165 			md64.md_data += ef->off;
1166 		} else if (error != 0)
1167 			return (error);
1168 		md.md_version = md64.md_version;
1169 		md.md_type = md64.md_type;
1170 		md.md_cval = (const char *)(uintptr_t)md64.md_cval;
1171 		md.md_data = (void *)(uintptr_t)md64.md_data;
1172 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
1173 		COPYOUT(v, &md32, sizeof(md32));
1174 		error = __elfN(reloc_ptr)(fp, ef, v, &md32, sizeof(md32));
1175 		if (error == EOPNOTSUPP) {
1176 			md32.md_cval += ef->off;
1177 			md32.md_data += ef->off;
1178 		} else if (error != 0)
1179 			return (error);
1180 		md.md_version = md32.md_version;
1181 		md.md_type = md32.md_type;
1182 		md.md_cval = (const char *)(uintptr_t)md32.md_cval;
1183 		md.md_data = (void *)(uintptr_t)md32.md_data;
1184 #else
1185 		COPYOUT(v, &md, sizeof(md));
1186 		error = __elfN(reloc_ptr)(fp, ef, v, &md, sizeof(md));
1187 		if (error == EOPNOTSUPP) {
1188 			md.md_cval += ef->off;
1189 			md.md_data = (void *)((uintptr_t)md.md_data +
1190 			    (uintptr_t)ef->off);
1191 		} else if (error != 0)
1192 			return (error);
1193 #endif
1194 		p += sizeof(Elf_Addr);
1195 		switch(md.md_type) {
1196 		case MDT_DEPEND:
1197 			if (ef->kernel) /* kernel must not depend on anything */
1198 				break;
1199 			s = strdupout((vm_offset_t)md.md_cval);
1200 			minfolen = sizeof(*mdepend) + strlen(s) + 1;
1201 			mdepend = malloc(minfolen);
1202 			if (mdepend == NULL)
1203 				return ENOMEM;
1204 			COPYOUT((vm_offset_t)md.md_data, mdepend,
1205 			    sizeof(*mdepend));
1206 			strcpy((char*)(mdepend + 1), s);
1207 			free(s);
1208 			file_addmetadata(fp, MODINFOMD_DEPLIST, minfolen,
1209 			    mdepend);
1210 			free(mdepend);
1211 			break;
1212 		case MDT_VERSION:
1213 			s = strdupout((vm_offset_t)md.md_cval);
1214 			COPYOUT((vm_offset_t)md.md_data, &mver, sizeof(mver));
1215 			file_addmodule(fp, s, mver.mv_version, NULL);
1216 			free(s);
1217 			modcnt++;
1218 			break;
1219 		}
1220 	}
1221 	if (modcnt == 0) {
1222 		s = fake_modname(fp->f_name);
1223 		file_addmodule(fp, s, 1, NULL);
1224 		free(s);
1225 	}
1226 	return 0;
1227 }
1228 
1229 static unsigned long
1230 elf_hash(const char *name)
1231 {
1232 	const unsigned char *p = (const unsigned char *) name;
1233 	unsigned long h = 0;
1234 	unsigned long g;
1235 
1236 	while (*p != '\0') {
1237 		h = (h << 4) + *p++;
1238 		if ((g = h & 0xf0000000) != 0)
1239 			h ^= g >> 24;
1240 		h &= ~g;
1241 	}
1242 	return h;
1243 }
1244 
1245 static const char __elfN(bad_symtable)[] = "elf" __XSTRING(__ELF_WORD_SIZE)
1246     "_lookup_symbol: corrupt symbol table\n";
1247 int
1248 __elfN(lookup_symbol)(elf_file_t ef, const char* name, Elf_Sym *symp,
1249     unsigned char type)
1250 {
1251 	Elf_Hashelt symnum;
1252 	Elf_Sym sym;
1253 	char *strp;
1254 	unsigned long hash;
1255 
1256 	if (ef->nbuckets == 0) {
1257 		printf(__elfN(bad_symtable));
1258 		return ENOENT;
1259 	}
1260 
1261 	hash = elf_hash(name);
1262 	COPYOUT(&ef->buckets[hash % ef->nbuckets], &symnum, sizeof(symnum));
1263 
1264 	while (symnum != STN_UNDEF) {
1265 		if (symnum >= ef->nchains) {
1266 			printf(__elfN(bad_symtable));
1267 			return ENOENT;
1268 		}
1269 
1270 		COPYOUT(ef->symtab + symnum, &sym, sizeof(sym));
1271 		if (sym.st_name == 0) {
1272 			printf(__elfN(bad_symtable));
1273 			return ENOENT;
1274 		}
1275 
1276 		strp = strdupout((vm_offset_t)(ef->strtab + sym.st_name));
1277 		if (strcmp(name, strp) == 0) {
1278 			free(strp);
1279 			if (sym.st_shndx != SHN_UNDEF ||
1280 			    (sym.st_value != 0 &&
1281 			    ELF_ST_TYPE(sym.st_info) == type)) {
1282 				*symp = sym;
1283 				return 0;
1284 			}
1285 			return ENOENT;
1286 		}
1287 		free(strp);
1288 		COPYOUT(&ef->chains[symnum], &symnum, sizeof(symnum));
1289 	}
1290 	return ENOENT;
1291 }
1292 
1293 /*
1294  * Apply any intra-module relocations to the value. p is the load address
1295  * of the value and val/len is the value to be modified. This does NOT modify
1296  * the image in-place, because this is done by kern_linker later on.
1297  *
1298  * Returns EOPNOTSUPP if no relocation method is supplied.
1299  */
1300 static int
1301 __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef,
1302     Elf_Addr p, void *val, size_t len)
1303 {
1304 	size_t n;
1305 	Elf_Rela a;
1306 	Elf_Rel r;
1307 	int error;
1308 
1309 	/*
1310 	 * The kernel is already relocated, but we still want to apply
1311 	 * offset adjustments.
1312 	 */
1313 	if (ef->kernel)
1314 		return (EOPNOTSUPP);
1315 
1316 	for (n = 0; n < ef->relsz / sizeof(r); n++) {
1317 		COPYOUT(ef->rel + n, &r, sizeof(r));
1318 
1319 		error = __elfN(reloc)(ef, __elfN(symaddr), &r, ELF_RELOC_REL,
1320 		    ef->off, p, val, len);
1321 		if (error != 0)
1322 			return (error);
1323 	}
1324 	for (n = 0; n < ef->relasz / sizeof(a); n++) {
1325 		COPYOUT(ef->rela + n, &a, sizeof(a));
1326 
1327 		error = __elfN(reloc)(ef, __elfN(symaddr), &a, ELF_RELOC_RELA,
1328 		    ef->off, p, val, len);
1329 		if (error != 0)
1330 			return (error);
1331 	}
1332 
1333 	return (0);
1334 }
1335 
1336 static Elf_Addr
1337 __elfN(symaddr)(struct elf_file *ef, Elf_Size symidx)
1338 {
1339 
1340 	/* Symbol lookup by index not required here. */
1341 	return (0);
1342 }
1343